mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
19 Commits
actions-ru
...
v0.27.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3e0bc3f7be | ||
|
|
ba1ac0990b | ||
|
|
76fe43e8e0 | ||
|
|
8869ad28bb | ||
|
|
b86af190f7 | ||
|
|
1a491cbfe5 | ||
|
|
087f20fd5d | ||
|
|
a880114e57 | ||
|
|
e80bc21fa5 | ||
|
|
56754094ea | ||
|
|
8fa4520376 | ||
|
|
a804bf8b00 | ||
|
|
5dea6db412 | ||
|
|
2a0b770a63 | ||
|
|
a7ef871248 | ||
|
|
e45e4c53f1 | ||
|
|
a608abd124 | ||
|
|
02d9add322 | ||
|
|
f5ac134787 |
164
.github/workflows/e2e-test-linux-vm.yaml
vendored
164
.github/workflows/e2e-test-linux-vm.yaml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
@@ -21,6 +21,7 @@ env:
|
||||
jobs:
|
||||
default-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
@@ -55,11 +56,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -84,11 +86,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -107,6 +110,7 @@ jobs:
|
||||
|
||||
single-namespace-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
@@ -143,11 +147,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -172,11 +177,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -195,6 +201,7 @@ jobs:
|
||||
|
||||
dind-mode-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: arc-test-dind-workflow.yaml
|
||||
@@ -229,11 +236,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -259,11 +267,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -282,6 +291,7 @@ jobs:
|
||||
|
||||
kubernetes-mode-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
|
||||
@@ -321,11 +331,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -355,11 +366,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -378,6 +390,7 @@ jobs:
|
||||
|
||||
auth-proxy-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
@@ -412,11 +425,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -453,11 +467,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -476,6 +491,7 @@ jobs:
|
||||
|
||||
anonymous-proxy-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
@@ -510,11 +526,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -545,11 +562,132 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
|
||||
- name: Test ARC E2E
|
||||
uses: ./.github/actions/execute-assert-arc-e2e
|
||||
timeout-minutes: 10
|
||||
with:
|
||||
auth-token: ${{ steps.setup.outputs.token }}
|
||||
repo-owner: ${{ env.TARGET_ORG }}
|
||||
repo-name: ${{env.TARGET_REPO}}
|
||||
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||
arc-namespace: "arc-runners"
|
||||
arc-controller-namespace: "arc-systems"
|
||||
|
||||
self-signed-ca-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
- uses: ./.github/actions/setup-arc-e2e
|
||||
id: setup
|
||||
with:
|
||||
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||
image-name: ${{env.IMAGE_NAME}}
|
||||
image-tag: ${{env.IMAGE_VERSION}}
|
||||
target-org: ${{env.TARGET_ORG}}
|
||||
|
||||
- name: Install gha-runner-scale-set-controller
|
||||
id: install_arc_controller
|
||||
run: |
|
||||
helm install arc \
|
||||
--namespace "arc-systems" \
|
||||
--create-namespace \
|
||||
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||
./charts/gha-runner-scale-set-controller \
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
run: |
|
||||
docker run -d \
|
||||
--rm \
|
||||
--name mitmproxy \
|
||||
--publish 8080:8080 \
|
||||
-v ${{ github.workspace }}/mitmproxy:/home/mitmproxy/.mitmproxy \
|
||||
mitmproxy/mitmproxy:latest \
|
||||
mitmdump
|
||||
count=0
|
||||
while true; do
|
||||
if [ -f "${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem" ]; then
|
||||
echo "CA cert generated"
|
||||
cat ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for mitmproxy generate its CA cert"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
sudo cp ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
|
||||
sudo chown runner ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
|
||||
kubectl create namespace arc-runners
|
||||
kubectl -n arc-runners create configmap ca-cert --from-file="${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt"
|
||||
kubectl -n arc-runners get configmap ca-cert -o yaml
|
||||
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||
helm install "$ARC_NAME" \
|
||||
--namespace "arc-runners" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||
--set proxy.https.url="http://host.minikube.internal:8080" \
|
||||
--set "proxy.noProxy[0]=10.96.0.1:443" \
|
||||
--set "githubServerTLS.certificateFrom.configMapKeyRef.name=ca-cert" \
|
||||
--set "githubServerTLS.certificateFrom.configMapKeyRef.key=mitmproxy-ca-cert.crt" \
|
||||
--set "githubServerTLS.runnerMountPath=/usr/local/share/ca-certificates/" \
|
||||
./charts/gha-runner-scale-set \
|
||||
--debug
|
||||
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
|
||||
300
.github/workflows/publish-chart.yaml
vendored
300
.github/workflows/publish-chart.yaml
vendored
@@ -5,22 +5,28 @@ name: Publish Helm Chart
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- master
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/publish-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!charts/gha-runner-scale-set-controller/**'
|
||||
- '!charts/gha-runner-scale-set/**'
|
||||
- '!**.md'
|
||||
- 'charts/**'
|
||||
- '.github/workflows/publish-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!charts/gha-runner-scale-set-controller/**'
|
||||
- '!charts/gha-runner-scale-set/**'
|
||||
- '!**.md'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
force:
|
||||
description: 'Force publish even if the chart version is not bumped'
|
||||
type: boolean
|
||||
required: true
|
||||
default: false
|
||||
|
||||
env:
|
||||
KUBE_SCORE_VERSION: 1.10.0
|
||||
HELM_VERSION: v3.8.0
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
lint-chart:
|
||||
@@ -29,91 +35,86 @@ jobs:
|
||||
outputs:
|
||||
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v3.4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v3.4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Set up kube-score
|
||||
run: |
|
||||
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||
chmod 755 kube-score
|
||||
- name: Set up kube-score
|
||||
run: |
|
||||
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||
chmod 755 kube-score
|
||||
|
||||
- name: Kube-score generated manifests
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
|
||||
--ignore-test pod-networkpolicy
|
||||
--ignore-test deployment-has-poddisruptionbudget
|
||||
--ignore-test deployment-has-host-podantiaffinity
|
||||
--ignore-test container-security-context
|
||||
--ignore-test pod-probes
|
||||
--ignore-test container-image-tag
|
||||
--enable-optional-test container-security-context-privileged
|
||||
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
- name: Kube-score generated manifests
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.3.1
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.3.1
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
fi
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: |
|
||||
ct lint --config charts/.ci/ct-config.yaml
|
||||
- name: Run chart-testing (lint)
|
||||
run: |
|
||||
ct lint --config charts/.ci/ct-config.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
uses: helm/kind-action@v1.4.0
|
||||
- name: Create kind cluster
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
uses: helm/kind-action@v1.4.0
|
||||
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
- name: Install cert-manager
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
- name: Install cert-manager
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct install --config charts/.ci/ct-config.yaml
|
||||
- name: Run chart-testing (install)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct install --config charts/.ci/ct-config.yaml
|
||||
|
||||
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
|
||||
- name: Check if Chart Publish is Needed
|
||||
id: publish-chart-step
|
||||
run: |
|
||||
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
|
||||
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
|
||||
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
|
||||
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
|
||||
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
|
||||
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
|
||||
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION ]]; then
|
||||
echo "publish=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "publish=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
|
||||
- name: Check if Chart Publish is Needed
|
||||
id: publish-chart-step
|
||||
run: |
|
||||
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
|
||||
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
|
||||
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
|
||||
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
|
||||
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
|
||||
|
||||
# Always publish if force is true
|
||||
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
|
||||
echo "publish=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "publish=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
publish-chart:
|
||||
if: needs.lint-chart.outputs.publish-chart == 'true'
|
||||
@@ -121,85 +122,86 @@ jobs:
|
||||
name: Publish Chart
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write # for helm/chart-releaser-action to push chart release and create a release
|
||||
contents: write # for helm/chart-releaser-action to push chart release and create a release
|
||||
env:
|
||||
CHART_TARGET_ORG: actions-runner-controller
|
||||
CHART_TARGET_REPO: actions-runner-controller.github.io
|
||||
CHART_TARGET_BRANCH: master
|
||||
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
organization: ${{ env.CHART_TARGET_ORG }}
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
organization: ${{ env.CHART_TARGET_ORG }}
|
||||
|
||||
- name: Install chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.4.1
|
||||
with:
|
||||
install_only: true
|
||||
install_dir: ${{ github.workspace }}/bin
|
||||
- name: Install chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.4.1
|
||||
with:
|
||||
install_only: true
|
||||
install_dir: ${{ github.workspace }}/bin
|
||||
|
||||
- name: Package and upload release assets
|
||||
run: |
|
||||
cr package \
|
||||
${{ github.workspace }}/charts/actions-runner-controller/ \
|
||||
--package-path .cr-release-packages
|
||||
- name: Package and upload release assets
|
||||
run: |
|
||||
cr package \
|
||||
${{ github.workspace }}/charts/actions-runner-controller/ \
|
||||
--package-path .cr-release-packages
|
||||
|
||||
cr upload \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--package-path .cr-release-packages \
|
||||
--token ${{ secrets.GITHUB_TOKEN }}
|
||||
cr upload \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--package-path .cr-release-packages \
|
||||
--token ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate updated index.yaml
|
||||
run: |
|
||||
cr index \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--index-path ${{ github.workspace }}/index.yaml \
|
||||
--pages-branch 'gh-pages' \
|
||||
--pages-index-path 'index.yaml'
|
||||
- name: Generate updated index.yaml
|
||||
run: |
|
||||
cr index \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--index-path ${{ github.workspace }}/index.yaml \
|
||||
--push \
|
||||
--pages-branch 'gh-pages' \
|
||||
--pages-index-path 'index.yaml'
|
||||
|
||||
# Chart Release was never intended to publish to a different repo
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
- name: Checkout pages repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||
path: ${{ env.CHART_TARGET_REPO }}
|
||||
ref: ${{ env.CHART_TARGET_BRANCH }}
|
||||
token: ${{ steps.get_workflow_token.outputs.token }}
|
||||
# Chart Release was never intended to publish to a different repo
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
- name: Checkout target repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||
path: ${{ env.CHART_TARGET_REPO }}
|
||||
ref: ${{ env.CHART_TARGET_BRANCH }}
|
||||
token: ${{ steps.get_workflow_token.outputs.token }}
|
||||
|
||||
- name: Copy index.yaml
|
||||
run: |
|
||||
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
|
||||
|
||||
- name: Commit and push
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
git add .
|
||||
git commit -m "Update index.yaml"
|
||||
git push
|
||||
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
|
||||
- name: Copy index.yaml
|
||||
run: |
|
||||
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/main/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
|
||||
- name: Commit and push to target repository
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
git add .
|
||||
git commit -m "Update index.yaml"
|
||||
git push
|
||||
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/main/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
4
.github/workflows/validate-gha-chart.yaml
vendored
4
.github/workflows/validate-gha-chart.yaml
vendored
@@ -71,7 +71,7 @@ jobs:
|
||||
git clone https://github.com/helm/chart-testing
|
||||
cd chart-testing
|
||||
unset CT_CONFIG_DIR
|
||||
goreleaser build --clean --skip-validate
|
||||
goreleaser build --clean --skip-validate
|
||||
./dist/chart-testing_linux_amd64_v1/ct version
|
||||
echo 'Adding ct directory to PATH...'
|
||||
echo "$RUNNER_TEMP/chart-testing/dist/chart-testing_linux_amd64_v1" >> "$GITHUB_PATH"
|
||||
@@ -107,7 +107,7 @@ jobs:
|
||||
load: true
|
||||
build-args: |
|
||||
DOCKER_IMAGE_NAME=test-arc
|
||||
VERSION=dev
|
||||
VERSION=dev
|
||||
tags: |
|
||||
test-arc:dev
|
||||
cache-from: type=gha
|
||||
|
||||
@@ -52,6 +52,9 @@ type AutoscalingListenerSpec struct {
|
||||
// Required
|
||||
Image string `json:"image,omitempty"`
|
||||
|
||||
// Required
|
||||
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
|
||||
|
||||
// Required
|
||||
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
||||
|
||||
|
||||
@@ -15,10 +15,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.23.0
|
||||
version: 0.23.1
|
||||
|
||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||
appVersion: 0.27.1
|
||||
appVersion: 0.27.2
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.3.0
|
||||
version: 0.4.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.3.0"
|
||||
appVersion: "0.4.0"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -80,6 +80,9 @@ spec:
|
||||
image:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullSecrets:
|
||||
description: Required
|
||||
items:
|
||||
|
||||
@@ -68,14 +68,11 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
|
||||
value: "{{ .Values.image.pullPolicy | default "IfNotPresent" }}"
|
||||
{{- with .Values.env }}
|
||||
{{- if kindIs "slice" .Values.env }}
|
||||
{{- toYaml .Values.env | nindent 8 }}
|
||||
{{- else }}
|
||||
{{- range $key, $val := .Values.env }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
{{- if kindIs "slice" . }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.resources }}
|
||||
|
||||
@@ -78,6 +78,13 @@ rules:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
@@ -133,4 +140,5 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
@@ -81,4 +81,4 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -52,6 +52,13 @@ rules:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
@@ -114,4 +121,5 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
{{- end }}
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
@@ -169,7 +169,7 @@ func TestTemplate_CreateManagerClusterRole(t *testing.T) {
|
||||
|
||||
assert.Empty(t, managerClusterRole.Namespace, "ClusterRole should not have a namespace")
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRole.Name)
|
||||
assert.Equal(t, 15, len(managerClusterRole.Rules))
|
||||
assert.Equal(t, 16, len(managerClusterRole.Rules))
|
||||
|
||||
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"})
|
||||
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_controller_role.yaml in chart", "We should get an error because the template should be skipped")
|
||||
@@ -349,13 +349,16 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
||||
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
|
||||
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
|
||||
@@ -390,6 +393,8 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
||||
"imagePullSecrets[0].name": "dockerhub",
|
||||
"nameOverride": "gha-runner-scale-set-controller-override",
|
||||
"fullnameOverride": "gha-runner-scale-set-controller-fullname-override",
|
||||
"env[0].name": "ENV_VAR_NAME_1",
|
||||
"env[0].value": "ENV_VAR_VALUE_1",
|
||||
"serviceAccount.name": "gha-runner-scale-set-controller-sa",
|
||||
"podAnnotations.foo": "bar",
|
||||
"podSecurityContext.fsGroup": "1000",
|
||||
@@ -432,6 +437,9 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
||||
assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"])
|
||||
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
||||
|
||||
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1)
|
||||
assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name)
|
||||
assert.Equal(t, "gha-runner-scale-set-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||
@@ -467,10 +475,16 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
||||
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 4)
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||
assert.Equal(t, "Always", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
||||
|
||||
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||
|
||||
@@ -690,13 +704,16 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
assert.Equal(t, "--watch-single-namespace=demo", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
||||
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
|
||||
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
|
||||
@@ -704,6 +721,52 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
||||
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
|
||||
}
|
||||
|
||||
func TestTemplate_ControllerContainerEnvironmentVariables(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"env[0].Name": "ENV_VAR_NAME_1",
|
||||
"env[0].Value": "ENV_VAR_VALUE_1",
|
||||
"env[1].Name": "ENV_VAR_NAME_2",
|
||||
"env[1].ValueFrom.SecretKeyRef.Key": "ENV_VAR_NAME_2",
|
||||
"env[1].ValueFrom.SecretKeyRef.Name": "secret-name",
|
||||
"env[1].ValueFrom.SecretKeyRef.Optional": "true",
|
||||
"env[2].Name": "ENV_VAR_NAME_3",
|
||||
"env[2].Value": "",
|
||||
"env[3].Name": "ENV_VAR_NAME_4",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
|
||||
|
||||
var deployment appsv1.Deployment
|
||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||
|
||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 7)
|
||||
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
||||
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].Name)
|
||||
assert.Equal(t, "secret-name", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Name)
|
||||
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Key)
|
||||
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Optional)
|
||||
assert.Equal(t, "ENV_VAR_NAME_3", deployment.Spec.Template.Spec.Containers[0].Env[5].Name)
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[5].Value)
|
||||
assert.Equal(t, "ENV_VAR_NAME_4", deployment.Spec.Template.Spec.Containers[0].Env[6].Name)
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[6].ValueFrom)
|
||||
}
|
||||
|
||||
func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -780,7 +843,7 @@ func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) {
|
||||
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRole.Name)
|
||||
assert.Equal(t, "demo", managerSingleNamespaceWatchRole.Namespace)
|
||||
assert.Equal(t, 13, len(managerSingleNamespaceWatchRole.Rules))
|
||||
assert.Equal(t, 14, len(managerSingleNamespaceWatchRole.Rules))
|
||||
}
|
||||
|
||||
func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) {
|
||||
|
||||
@@ -18,6 +18,17 @@ imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
env:
|
||||
## Define environment variables for the controller pod
|
||||
# - name: "ENV_VAR_NAME_1"
|
||||
# value: "ENV_VAR_VALUE_1"
|
||||
# - name: "ENV_VAR_NAME_2"
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# key: ENV_VAR_NAME_2
|
||||
# name: secret-name
|
||||
# optional: true
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created for running the controller pod
|
||||
create: true
|
||||
@@ -31,27 +42,27 @@ serviceAccount:
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||
## choice for the user. This also increases chances charts run on environments with little
|
||||
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
@@ -69,6 +80,6 @@ flags:
|
||||
# Defaults to "debug".
|
||||
logLevel: "debug"
|
||||
|
||||
# Restricts the controller to only watch resources in the desired namespace.
|
||||
# Defaults to watch all namespaces when unset.
|
||||
# watchSingleNamespace: ""
|
||||
## Restricts the controller to only watch resources in the desired namespace.
|
||||
## Defaults to watch all namespaces when unset.
|
||||
# watchSingleNamespace: ""
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.3.0
|
||||
version: 0.4.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.3.0"
|
||||
appVersion: "0.4.0"
|
||||
|
||||
home: https://github.com/actions/dev-arc
|
||||
|
||||
|
||||
@@ -11,17 +11,9 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
@@ -41,6 +33,8 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/part-of: gha-runner-scale-set
|
||||
actions.github.com/scale-set-name: {{ .Release.Name }}
|
||||
actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
@@ -71,6 +65,10 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubeModeRoleBindingName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role-binding
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-service-account
|
||||
{{- end }}
|
||||
@@ -433,7 +431,7 @@ volumeMounts:
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-manager-role
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.managerRoleBinding" -}}
|
||||
{{- define "gha-runner-scale-set.managerRoleBindingName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-manager-role-binding
|
||||
{{- end }}
|
||||
|
||||
|
||||
@@ -12,6 +12,21 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: "autoscaling-runner-set"
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- if not (kindIs "string" .Values.githubConfigSecret) }}
|
||||
actions.github.com/cleanup-github-secret-name: {{ include "gha-runner-scale-set.githubsecret" . }}
|
||||
{{- end }}
|
||||
actions.github.com/cleanup-manager-role-binding: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
|
||||
actions.github.com/cleanup-manager-role-name: {{ include "gha-runner-scale-set.managerRoleName" . }}
|
||||
{{- if and $containerMode (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
actions.github.com/cleanup-kubernetes-mode-role-binding-name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
|
||||
actions.github.com/cleanup-kubernetes-mode-role-name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||
actions.github.com/cleanup-kubernetes-mode-service-account-name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
|
||||
{{- end }}
|
||||
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
actions.github.com/cleanup-no-permission-service-account-name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
|
||||
{{- end }}
|
||||
spec:
|
||||
githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }}
|
||||
githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }}
|
||||
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
finalizers:
|
||||
- actions.github.com/secret-protection
|
||||
- actions.github.com/cleanup-protection
|
||||
data:
|
||||
{{- $hasToken := false }}
|
||||
{{- $hasAppId := false }}
|
||||
@@ -36,4 +36,4 @@ data:
|
||||
{{- if and $hasAppId (or (not $hasInstallationId) (not $hasPrivateKey)) }}
|
||||
{{- fail "A valid .Values.githubConfigSecret is required for setting auth with GitHub server, provide .Values.githubConfigSecret.github_app_installation_id and .Values.githubConfigSecret.github_app_private_key." }}
|
||||
{{- end }}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
|
||||
@@ -6,6 +6,8 @@ kind: Role
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
|
||||
@@ -3,8 +3,10 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||
name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
|
||||
@@ -5,6 +5,8 @@ kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -3,6 +3,11 @@ kind: Role
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.managerRoleName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: manager-role
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -29,6 +34,17 @@ rules:
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
@@ -56,4 +72,4 @@ rules:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.managerRoleBinding" . }}
|
||||
name: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: manager-role-binding
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -10,4 +15,4 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "gha-runner-scale-set.managerServiceAccountName" . | nindent 4 }}
|
||||
namespace: {{ include "gha-runner-scale-set.managerServiceAccountNamespace" . | nindent 4 }}
|
||||
namespace: {{ include "gha-runner-scale-set.managerServiceAccountNamespace" . | nindent 4 }}
|
||||
|
||||
@@ -7,4 +7,6 @@ metadata:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
{{- end }}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
actionsgithubcom "github.com/actions/actions-runner-controller/controllers/actions.github.com"
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"github.com/gruntwork-io/terratest/modules/k8s"
|
||||
"github.com/gruntwork-io/terratest/modules/random"
|
||||
@@ -43,7 +45,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) {
|
||||
assert.Equal(t, namespaceName, githubSecret.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", githubSecret.Name)
|
||||
assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"]))
|
||||
assert.Equal(t, "actions.github.com/secret-protection", githubSecret.Finalizers[0])
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", githubSecret.Finalizers[0])
|
||||
}
|
||||
|
||||
func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) {
|
||||
@@ -188,6 +190,7 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) // no finalizer protections in place
|
||||
}
|
||||
|
||||
func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
@@ -217,6 +220,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
|
||||
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", serviceAccount.Name)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
|
||||
var role rbacv1.Role
|
||||
@@ -224,6 +228,9 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
|
||||
assert.Equal(t, namespaceName, role.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", role.Name)
|
||||
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", role.Finalizers[0])
|
||||
|
||||
assert.Len(t, role.Rules, 5, "kube mode role should have 5 rules")
|
||||
assert.Equal(t, "pods", role.Rules[0].Resources[0])
|
||||
assert.Equal(t, "pods/exec", role.Rules[1].Resources[0])
|
||||
@@ -236,18 +243,21 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &roleBinding)
|
||||
|
||||
assert.Equal(t, namespaceName, roleBinding.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.Name)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role-binding", roleBinding.Name)
|
||||
assert.Len(t, roleBinding.Subjects, 1)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", roleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "Role", roleBinding.RoleRef.Kind)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
||||
expectedServiceAccountName := "test-runners-gha-runner-scale-set-kube-mode-service-account"
|
||||
assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
|
||||
}
|
||||
|
||||
func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
|
||||
@@ -279,6 +289,7 @@ func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Equal(t, "test-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
|
||||
@@ -1458,7 +1469,11 @@ func TestTemplate_CreateManagerRole(t *testing.T) {
|
||||
|
||||
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
|
||||
assert.Equal(t, 5, len(managerRole.Rules))
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
|
||||
assert.Equal(t, 6, len(managerRole.Rules))
|
||||
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
}
|
||||
|
||||
func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) {
|
||||
@@ -1489,8 +1504,9 @@ func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) {
|
||||
|
||||
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
|
||||
assert.Equal(t, 6, len(managerRole.Rules))
|
||||
assert.Equal(t, "configmaps", managerRole.Rules[5].Resources[0])
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
|
||||
assert.Equal(t, 7, len(managerRole.Rules))
|
||||
assert.Equal(t, "configmaps", managerRole.Rules[6].Resources[0])
|
||||
}
|
||||
|
||||
func TestTemplate_CreateManagerRoleBinding(t *testing.T) {
|
||||
@@ -1521,6 +1537,7 @@ func TestTemplate_CreateManagerRoleBinding(t *testing.T) {
|
||||
assert.Equal(t, namespaceName, managerRoleBinding.Namespace, "namespace should match the namespace of the Helm release")
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role-binding", managerRoleBinding.Name)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", managerRoleBinding.Finalizers[0])
|
||||
assert.Equal(t, "arc", managerRoleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, "arc-system", managerRoleBinding.Subjects[0].Namespace)
|
||||
}
|
||||
@@ -1692,3 +1709,103 @@ func TestTemplateRenderedAutoScalingRunnerSet_KubeModeMergePodSpec(t *testing.T)
|
||||
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
|
||||
assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others")
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
annotationExpectedTests := map[string]*helm.Options{
|
||||
"GitHub token": {
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
},
|
||||
"GitHub app": {
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_app_id": "10",
|
||||
"githubConfigSecret.github_app_installation_id": "100",
|
||||
"githubConfigSecret.github_app_private_key": "private_key",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
},
|
||||
}
|
||||
|
||||
for name, options := range annotationExpectedTests {
|
||||
t.Run("Annotation set: "+name, func(t *testing.T) {
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||
|
||||
assert.NotEmpty(t, autoscalingRunnerSet.Annotations[actionsgithubcom.AnnotationKeyGitHubSecretName])
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("Annotation should not be set", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secret",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||
|
||||
assert.Empty(t, autoscalingRunnerSet.Annotations[actionsgithubcom.AnnotationKeyGitHubSecretName])
|
||||
})
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
"containerMode.type": "kubernetes",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||
|
||||
annotationValues := map[string]string{
|
||||
actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-runner-scale-set-github-secret",
|
||||
actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-runner-scale-set-manager-role",
|
||||
actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-runner-scale-set-manager-role-binding",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-runner-scale-set-kube-mode-service-account",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-runner-scale-set-kube-mode-role",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-runner-scale-set-kube-mode-role-binding",
|
||||
}
|
||||
|
||||
for annotation, value := range annotationValues {
|
||||
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ githubConfigSecret:
|
||||
# certificateFrom:
|
||||
# configMapKeyRef:
|
||||
# name: config-map-name
|
||||
# key: ca.pem
|
||||
# key: ca.crt
|
||||
# runnerMountPath: /usr/local/share/ca-certificates/
|
||||
|
||||
# containerMode:
|
||||
|
||||
@@ -80,6 +80,9 @@ spec:
|
||||
image:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullSecrets:
|
||||
description: Required
|
||||
items:
|
||||
|
||||
@@ -56,6 +56,8 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
|
||||
value: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: controller-manager
|
||||
mountPath: "/etc/actions-runner-controller"
|
||||
|
||||
@@ -102,6 +102,13 @@ rules:
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
|
||||
@@ -41,7 +41,6 @@ import (
|
||||
|
||||
const (
|
||||
autoscalingListenerContainerName = "autoscaler"
|
||||
autoscalingListenerOwnerKey = ".metadata.controller"
|
||||
autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer"
|
||||
)
|
||||
|
||||
@@ -246,65 +245,6 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
groupVersionIndexer := func(rawObj client.Object) []string {
|
||||
groupVersion := v1alpha1.GroupVersion.String()
|
||||
owner := metav1.GetControllerOf(rawObj)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...make sure it is owned by this controller
|
||||
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
|
||||
var requests []reconcile.Request
|
||||
labels := obj.GetLabels()
|
||||
namespace, ok := labels["auto-scaling-listener-namespace"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
name, ok := labels["auto-scaling-listener-name"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
requests = append(requests,
|
||||
reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
)
|
||||
return requests
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.AutoscalingListener{}).
|
||||
Owns(&corev1.Pod{}).
|
||||
Owns(&corev1.ServiceAccount{}).
|
||||
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) {
|
||||
logger.Info("Cleaning up the listener pod")
|
||||
listenerPod := new(corev1.Pod)
|
||||
@@ -615,3 +555,62 @@ func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context
|
||||
"serviceAccount", serviceAccount.Name)
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
groupVersionIndexer := func(rawObj client.Object) []string {
|
||||
groupVersion := v1alpha1.GroupVersion.String()
|
||||
owner := metav1.GetControllerOf(rawObj)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...make sure it is owned by this controller
|
||||
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, resourceOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, resourceOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
|
||||
var requests []reconcile.Request
|
||||
labels := obj.GetLabels()
|
||||
namespace, ok := labels["auto-scaling-listener-namespace"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
name, ok := labels["auto-scaling-listener-name"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
requests = append(requests,
|
||||
reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
)
|
||||
return requests
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.AutoscalingListener{}).
|
||||
Owns(&corev1.Pod{}).
|
||||
Owns(&corev1.ServiceAccount{}).
|
||||
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@@ -213,7 +213,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
Eventually(
|
||||
func() error {
|
||||
podList := new(corev1.PodList)
|
||||
err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name})
|
||||
err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingListener.Name})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -231,7 +231,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
Eventually(
|
||||
func() error {
|
||||
serviceAccountList := new(corev1.ServiceAccountList)
|
||||
err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name})
|
||||
err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingListener.Name})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -41,9 +42,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO: Replace with shared image.
|
||||
autoscalingRunnerSetOwnerKey = ".metadata.controller"
|
||||
LabelKeyRunnerSpecHash = "runner-spec-hash"
|
||||
labelKeyRunnerSpecHash = "runner-spec-hash"
|
||||
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
|
||||
runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
|
||||
runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
|
||||
@@ -113,6 +112,17 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
requeue, err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to remove finalizers on dependent resources")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if requeue {
|
||||
log.Info("Waiting for dependent resources to be deleted")
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
log.Info("Removing finalizer")
|
||||
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName)
|
||||
@@ -188,10 +198,10 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
|
||||
desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||
for _, runnerSet := range existingRunnerSets.all() {
|
||||
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[LabelKeyRunnerSpecHash])
|
||||
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[labelKeyRunnerSpecHash])
|
||||
}
|
||||
|
||||
if desiredSpecHash != latestRunnerSet.Labels[LabelKeyRunnerSpecHash] {
|
||||
if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] {
|
||||
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set")
|
||||
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
|
||||
}
|
||||
@@ -219,7 +229,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
}
|
||||
|
||||
// Our listener pod is out of date, so we need to delete it to get a new recreate.
|
||||
if listener.Labels[LabelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() {
|
||||
if listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() {
|
||||
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
|
||||
if err := r.Delete(ctx, listener); err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
@@ -305,6 +315,29 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (requeue bool, err error) {
|
||||
c := autoscalingRunnerSetFinalizerDependencyCleaner{
|
||||
client: r.Client,
|
||||
autoscalingRunnerSet: autoscalingRunnerSet,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
c.removeKubernetesModeRoleBindingFinalizer(ctx)
|
||||
c.removeKubernetesModeRoleFinalizer(ctx)
|
||||
c.removeKubernetesModeServiceAccountFinalizer(ctx)
|
||||
c.removeNoPermissionServiceAccountFinalizer(ctx)
|
||||
c.removeGitHubSecretFinalizer(ctx)
|
||||
c.removeManagerRoleBindingFinalizer(ctx)
|
||||
c.removeManagerRoleFinalizer(ctx)
|
||||
|
||||
requeue, err = c.result()
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to cleanup finalizer from dependent resource")
|
||||
return true, err
|
||||
}
|
||||
return requeue, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||
logger.Info("Creating a new runner scale set")
|
||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
@@ -467,12 +500,28 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
|
||||
scaleSetId, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
|
||||
if !ok {
|
||||
// Annotation not being present can occur in 3 scenarios
|
||||
// 1. Scale set is never created.
|
||||
// In this case, we don't need to fetch the actions client to delete the scale set that does not exist
|
||||
//
|
||||
// 2. The scale set has been deleted by the controller.
|
||||
// In that case, the controller will clean up annotation because the scale set does not exist anymore.
|
||||
// Removal of the scale set id is also useful because permission cleanup will eventually lose permission
|
||||
// assigned to it on a GitHub secret, causing actions client from secret to result in permission denied
|
||||
//
|
||||
// 3. Annotation is removed manually.
|
||||
// In this case, the controller will treat this as if the scale set is being removed from the actions service
|
||||
// Then, manual deletion of the scale set is required.
|
||||
return nil
|
||||
}
|
||||
logger.Info("Deleting the runner scale set from Actions service")
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
runnerScaleSetId, err := strconv.Atoi(scaleSetId)
|
||||
if err != nil {
|
||||
// If the annotation is not set correctly, or if it does not exist, we are going to get stuck in a loop trying to parse the scale set id.
|
||||
// If the configuration is invalid (secret does not exist for example), we never get to the point to create runner set. But then, manual cleanup
|
||||
// would get stuck finalizing the resource trying to parse annotation indefinitely
|
||||
// If the annotation is not set correctly, we are going to get stuck in a loop trying to parse the scale set id.
|
||||
// If the configuration is invalid (secret does not exist for example), we never got to the point to create runner set.
|
||||
// But then, manual cleanup would get stuck finalizing the resource trying to parse annotation indefinitely
|
||||
logger.Info("autoscaling runner set does not have annotation describing scale set id. Skip deletion", "err", err.Error())
|
||||
return nil
|
||||
}
|
||||
@@ -489,6 +538,14 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
|
||||
return err
|
||||
}
|
||||
|
||||
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||
delete(obj.Annotations, runnerScaleSetIdAnnotationKey)
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to patch autoscaling runner set with annotation removed", "annotation", runnerScaleSetIdAnnotationKey)
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("Deleted the runner scale set from Actions service")
|
||||
return nil
|
||||
}
|
||||
@@ -541,7 +598,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) {
|
||||
list := new(v1alpha1.EphemeralRunnerSetList)
|
||||
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingRunnerSet.Name}); err != nil {
|
||||
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingRunnerSet.Name}); err != nil {
|
||||
return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
|
||||
}
|
||||
|
||||
@@ -634,7 +691,7 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro
|
||||
return []string{owner.Name}
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, autoscalingRunnerSetOwnerKey, groupVersionIndexer); err != nil {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, resourceOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -658,6 +715,328 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
type autoscalingRunnerSetFinalizerDependencyCleaner struct {
|
||||
// configuration fields
|
||||
client client.Client
|
||||
autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
|
||||
logger logr.Logger
|
||||
|
||||
// fields to operate on
|
||||
requeue bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) result() (requeue bool, err error) {
|
||||
return c.requeue, c.err
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleBindingFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
roleBindingName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up kubernetes mode service account",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeRoleBindingName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from container mode kubernetes role binding", "name", roleBindingName)
|
||||
|
||||
roleBinding := new(rbacv1.RoleBinding)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(roleBinding, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Kubernetes mode role binding finalizer has already been removed", "name", roleBindingName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from container mode kubernetes role binding", "name", roleBindingName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch kubernetes mode role binding: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Container mode kubernetes role binding has already been deleted", "name", roleBindingName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
roleName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up kubernetes mode role",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeRoleName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from container mode kubernetes role", "name", roleName)
|
||||
role := new(rbacv1.Role)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: roleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(role, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Kubernetes mode role finalizer has already been removed", "name", roleName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from container mode kubernetes role")
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch kubernetes mode role: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Container mode kubernetes role has already been deleted", "name", roleName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeServiceAccountFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
serviceAccountName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up kubernetes mode role binding",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeServiceAccountName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from container mode kubernetes service account", "name", serviceAccountName)
|
||||
|
||||
serviceAccount := new(corev1.ServiceAccount)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(serviceAccount, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Kubernetes mode service account finalizer has already been removed", "name", serviceAccountName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from container mode kubernetes service account")
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch kubernetes mode service account: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Container mode kubernetes service account has already been deleted", "name", serviceAccountName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServiceAccountFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
serviceAccountName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up no permission service account",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyNoPermissionServiceAccountName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from no permission service account", "name", serviceAccountName)
|
||||
|
||||
serviceAccount := new(corev1.ServiceAccount)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(serviceAccount, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("No permission service account finalizer has already been removed", "name", serviceAccountName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from no permission service account", "name", serviceAccountName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch service account: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("No permission service account has already been deleted", "name", serviceAccountName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
githubSecretName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up no permission service account",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyGitHubSecretName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from GitHub secret", "name", githubSecretName)
|
||||
|
||||
githubSecret := new(corev1.Secret)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: githubSecretName, Namespace: c.autoscalingRunnerSet.Namespace}, githubSecret)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(githubSecret, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("GitHub secret finalizer has already been removed", "name", githubSecretName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, githubSecret, func(obj *corev1.Secret) {
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from GitHub secret", "name", githubSecretName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err) && !kerrors.IsForbidden(err):
|
||||
c.err = fmt.Errorf("failed to fetch GitHub secret: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("GitHub secret has already been deleted", "name", githubSecretName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindingFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
managerRoleBindingName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up manager role binding",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyManagerRoleBindingName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from manager role binding", "name", managerRoleBindingName)
|
||||
|
||||
roleBinding := new(rbacv1.RoleBinding)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(roleBinding, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Manager role binding finalizer has already been removed", "name", managerRoleBindingName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from manager role binding", "name", managerRoleBindingName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch manager role binding: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Manager role binding has already been deleted", "name", managerRoleBindingName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
managerRoleName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up manager role",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyManagerRoleName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from manager role", "name", managerRoleName)
|
||||
|
||||
role := new(rbacv1.Role)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(role, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Manager role finalizer has already been removed", "name", managerRoleName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from manager role", "name", managerRoleName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch manager role: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Manager role has already been deleted", "name", managerRoleName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: if this is logic should be used for other resources,
|
||||
// consider using generics
|
||||
type EphemeralRunnerSets struct {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
@@ -23,6 +24,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
@@ -278,10 +280,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
||||
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
|
||||
}
|
||||
|
||||
return runnerSetList.Items[0].Labels[LabelKeyRunnerSpecHash], nil
|
||||
return runnerSetList.Items[0].Labels[labelKeyRunnerSpecHash], nil
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[LabelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
|
||||
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[labelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
|
||||
|
||||
// We should create a new listener
|
||||
Eventually(
|
||||
@@ -571,6 +573,7 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
||||
|
||||
update := autoscalingRunnerSet.DeepCopy()
|
||||
update.Spec.RunnerScaleSetName = "testset_update"
|
||||
|
||||
err = k8sClient.Patch(ctx, update, client.MergeFrom(autoscalingRunnerSet))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update AutoScalingRunnerSet")
|
||||
|
||||
@@ -1036,7 +1039,7 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
g.Expect(listener.Spec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "listener does not have TLS config")
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(Succeed(), "tls config is incorrect")
|
||||
})
|
||||
|
||||
@@ -1093,8 +1096,372 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
g.Expect(runnerSet.Spec.EphemeralRunnerSpec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "EphemeralRunnerSpec does not have TLS config")
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test external permissions cleanup", func() {
|
||||
It("Should clean up kubernetes mode permissions", func() {
|
||||
ctx := context.Background()
|
||||
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
|
||||
|
||||
configSecret := createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
controller := &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
|
||||
startManagers(GinkgoT(), mgr)
|
||||
|
||||
min := 1
|
||||
max := 10
|
||||
autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "gha-runner-scale-set",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
AnnotationKeyKubernetesModeRoleBindingName: "kube-mode-role-binding",
|
||||
AnnotationKeyKubernetesModeRoleName: "kube-mode-role",
|
||||
AnnotationKeyKubernetesModeServiceAccountName: "kube-mode-service-account",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
MaxRunners: &max,
|
||||
MinRunners: &min,
|
||||
RunnerGroup: "testgroup",
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner",
|
||||
Image: "ghcr.io/actions/runner",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, role)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode role")
|
||||
|
||||
serviceAccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, serviceAccount)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode service account")
|
||||
|
||||
roleBinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: serviceAccount.Name,
|
||||
Namespace: serviceAccount.Namespace,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
// Kind is the type of resource being referenced
|
||||
Kind: "Role",
|
||||
Name: role.Name,
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, roleBinding)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode role binding")
|
||||
|
||||
err = k8sClient.Create(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
|
||||
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
created := new(v1alpha1.AutoscalingRunnerSet)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(created.Finalizers) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return created.Finalizers[0], nil
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
|
||||
|
||||
err = k8sClient.Delete(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete autoscaling runner set")
|
||||
|
||||
err = k8sClient.Delete(ctx, roleBinding)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode role binding")
|
||||
|
||||
err = k8sClient.Delete(ctx, role)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode role")
|
||||
|
||||
err = k8sClient.Delete(ctx, serviceAccount)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode service account")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(rbacv1.RoleBinding)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: roleBinding.Name,
|
||||
Namespace: roleBinding.Namespace,
|
||||
}, r)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role binding to be cleaned up")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(rbacv1.Role)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: role.Name,
|
||||
Namespace: role.Namespace,
|
||||
}, r)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role to be cleaned up")
|
||||
})
|
||||
|
||||
It("Should clean up manager permissions and no-permission service account", func() {
|
||||
ctx := context.Background()
|
||||
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
|
||||
|
||||
controller := &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
|
||||
startManagers(GinkgoT(), mgr)
|
||||
|
||||
min := 1
|
||||
max := 10
|
||||
autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "gha-runner-scale-set",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
AnnotationKeyManagerRoleName: "manager-role",
|
||||
AnnotationKeyManagerRoleBindingName: "manager-role-binding",
|
||||
AnnotationKeyGitHubSecretName: "gh-secret-name",
|
||||
AnnotationKeyNoPermissionServiceAccountName: "no-permission-sa",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
MaxRunners: &max,
|
||||
MinRunners: &min,
|
||||
RunnerGroup: "testgroup",
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner",
|
||||
Image: "ghcr.io/actions/runner",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"github_token": []byte(defaultGitHubToken),
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(context.Background(), secret)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create github secret")
|
||||
|
||||
autoscalingRunnerSet.Spec.GitHubConfigSecret = secret.Name
|
||||
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, role)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager role")
|
||||
|
||||
roleBinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: "Role",
|
||||
Name: role.Name,
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, roleBinding)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager role binding")
|
||||
|
||||
noPermissionServiceAccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, noPermissionServiceAccount)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create no permission service account")
|
||||
|
||||
err = k8sClient.Create(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
|
||||
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
created := new(v1alpha1.AutoscalingRunnerSet)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(created.Finalizers) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return created.Finalizers[0], nil
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
|
||||
|
||||
err = k8sClient.Delete(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete autoscaling runner set")
|
||||
|
||||
err = k8sClient.Delete(ctx, noPermissionServiceAccount)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete no permission service account")
|
||||
|
||||
err = k8sClient.Delete(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete GitHub secret")
|
||||
|
||||
err = k8sClient.Delete(ctx, roleBinding)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete manager role binding")
|
||||
|
||||
err = k8sClient.Delete(ctx, role)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete manager role")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(corev1.ServiceAccount)
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
Name: noPermissionServiceAccount.Name,
|
||||
Namespace: noPermissionServiceAccount.Namespace,
|
||||
},
|
||||
r,
|
||||
)
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected no permission service account to be cleaned up")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(corev1.Secret)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: secret.Name,
|
||||
Namespace: secret.Namespace,
|
||||
}, r)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role binding to be cleaned up")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(rbacv1.RoleBinding)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: roleBinding.Name,
|
||||
Namespace: roleBinding.Namespace,
|
||||
}, r)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role binding to be cleaned up")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(rbacv1.Role)
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
Name: role.Name,
|
||||
Namespace: role.Namespace,
|
||||
},
|
||||
r,
|
||||
)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role to be cleaned up")
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package actionsgithubcom
|
||||
|
||||
import corev1 "k8s.io/api/core/v1"
|
||||
|
||||
const (
|
||||
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
||||
LabelKeyPodTemplateHash = "pod-template-hash"
|
||||
@@ -16,3 +18,47 @@ const (
|
||||
EnvVarHTTPSProxy = "https_proxy"
|
||||
EnvVarNoProxy = "no_proxy"
|
||||
)
|
||||
|
||||
// Labels applied to resources
|
||||
const (
|
||||
// Kubernetes labels
|
||||
LabelKeyKubernetesPartOf = "app.kubernetes.io/part-of"
|
||||
LabelKeyKubernetesComponent = "app.kubernetes.io/component"
|
||||
LabelKeyKubernetesVersion = "app.kubernetes.io/version"
|
||||
|
||||
// Github labels
|
||||
LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name"
|
||||
LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace"
|
||||
LabelKeyGitHubEnterprise = "actions.github.com/enterprise"
|
||||
LabelKeyGitHubOrganization = "actions.github.com/organization"
|
||||
LabelKeyGitHubRepository = "actions.github.com/repository"
|
||||
)
|
||||
|
||||
// Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running
|
||||
const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
|
||||
|
||||
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
|
||||
|
||||
// Labels applied to listener roles
|
||||
const (
|
||||
labelKeyListenerName = "auto-scaling-listener-name"
|
||||
labelKeyListenerNamespace = "auto-scaling-listener-namespace"
|
||||
)
|
||||
|
||||
// Annotations applied for later cleanup of resources
|
||||
const (
|
||||
AnnotationKeyManagerRoleBindingName = "actions.github.com/cleanup-manager-role-binding"
|
||||
AnnotationKeyManagerRoleName = "actions.github.com/cleanup-manager-role-name"
|
||||
AnnotationKeyKubernetesModeRoleName = "actions.github.com/cleanup-kubernetes-mode-role-name"
|
||||
AnnotationKeyKubernetesModeRoleBindingName = "actions.github.com/cleanup-kubernetes-mode-role-binding-name"
|
||||
AnnotationKeyKubernetesModeServiceAccountName = "actions.github.com/cleanup-kubernetes-mode-service-account-name"
|
||||
AnnotationKeyGitHubSecretName = "actions.github.com/cleanup-github-secret-name"
|
||||
AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name"
|
||||
)
|
||||
|
||||
// DefaultScaleSetListenerImagePullPolicy is the default pull policy applied
|
||||
// to the listener when ImagePullPolicy is not specified
|
||||
const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent
|
||||
|
||||
// ownerKey is field selector matching the owner name of a particular resource
|
||||
const resourceOwnerKey = ".metadata.controller"
|
||||
|
||||
@@ -40,8 +40,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
ephemeralRunnerSetReconcilerOwnerKey = ".metadata.controller"
|
||||
ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer"
|
||||
ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer"
|
||||
)
|
||||
|
||||
// EphemeralRunnerSetReconciler reconciles a EphemeralRunnerSet object
|
||||
@@ -56,6 +55,7 @@ type EphemeralRunnerSetReconciler struct {
|
||||
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/finalizers,verbs=update;patch
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get
|
||||
|
||||
@@ -146,7 +146,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
ctx,
|
||||
ephemeralRunnerList,
|
||||
client.InNamespace(req.Namespace),
|
||||
client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: req.Name},
|
||||
client.MatchingFields{resourceOwnerKey: req.Name},
|
||||
)
|
||||
if err != nil {
|
||||
log.Error(err, "Unable to list child ephemeral runners")
|
||||
@@ -242,7 +242,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, e
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) {
|
||||
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
|
||||
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: ephemeralRunnerSet.Name})
|
||||
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: ephemeralRunnerSet.Name})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list child ephemeral runners: %v", err)
|
||||
}
|
||||
@@ -521,7 +521,7 @@ func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Conte
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
// Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups.
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, ephemeralRunnerSetReconcilerOwnerKey, func(rawObj client.Object) []string {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, resourceOwnerKey, func(rawObj client.Object) []string {
|
||||
groupVersion := v1alpha1.GroupVersion.String()
|
||||
|
||||
// grab the job object, extract the owner...
|
||||
|
||||
@@ -20,29 +20,6 @@ const (
|
||||
jitTokenKey = "jitToken"
|
||||
)
|
||||
|
||||
// Labels applied to resources
|
||||
const (
|
||||
// Kubernetes labels
|
||||
LabelKeyKubernetesPartOf = "app.kubernetes.io/part-of"
|
||||
LabelKeyKubernetesComponent = "app.kubernetes.io/component"
|
||||
LabelKeyKubernetesVersion = "app.kubernetes.io/version"
|
||||
|
||||
// Github labels
|
||||
LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name"
|
||||
LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace"
|
||||
LabelKeyGitHubEnterprise = "actions.github.com/enterprise"
|
||||
LabelKeyGitHubOrganization = "actions.github.com/organization"
|
||||
LabelKeyGitHubRepository = "actions.github.com/repository"
|
||||
)
|
||||
|
||||
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
|
||||
|
||||
// Labels applied to listener roles
|
||||
const (
|
||||
labelKeyListenerName = "auto-scaling-listener-name"
|
||||
labelKeyListenerNamespace = "auto-scaling-listener-namespace"
|
||||
)
|
||||
|
||||
var commonLabelKeys = [...]string{
|
||||
LabelKeyKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent,
|
||||
@@ -56,8 +33,77 @@ var commonLabelKeys = [...]string{
|
||||
|
||||
const labelValueKubernetesPartOf = "gha-runner-scale-set"
|
||||
|
||||
// scaleSetListenerImagePullPolicy is applied to all listeners
|
||||
var scaleSetListenerImagePullPolicy = DefaultScaleSetListenerImagePullPolicy
|
||||
|
||||
func SetListenerImagePullPolicy(pullPolicy string) bool {
|
||||
switch p := corev1.PullPolicy(pullPolicy); p {
|
||||
case corev1.PullAlways, corev1.PullNever, corev1.PullIfNotPresent:
|
||||
scaleSetListenerImagePullPolicy = p
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
type resourceBuilder struct{}
|
||||
|
||||
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
effectiveMinRunners := 0
|
||||
effectiveMaxRunners := math.MaxInt32
|
||||
if autoscalingRunnerSet.Spec.MaxRunners != nil {
|
||||
effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners
|
||||
}
|
||||
if autoscalingRunnerSet.Spec.MinRunners != nil {
|
||||
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
|
||||
}
|
||||
|
||||
githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse github config from url: %v", err)
|
||||
}
|
||||
|
||||
autoscalingListener := &v1alpha1.AutoscalingListener{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerName(autoscalingRunnerSet),
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent: "runner-scale-set-listener",
|
||||
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
LabelKeyGitHubEnterprise: githubConfig.Enterprise,
|
||||
LabelKeyGitHubOrganization: githubConfig.Organization,
|
||||
LabelKeyGitHubRepository: githubConfig.Repository,
|
||||
labelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingListenerSpec{
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
|
||||
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
|
||||
MinRunners: effectiveMinRunners,
|
||||
MaxRunners: effectiveMaxRunners,
|
||||
Image: image,
|
||||
ImagePullPolicy: scaleSetListenerImagePullPolicy,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
},
|
||||
}
|
||||
|
||||
return autoscalingListener, nil
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
|
||||
listenerEnv := []corev1.EnvVar{
|
||||
{
|
||||
@@ -150,7 +196,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
||||
Name: autoscalingListenerContainerName,
|
||||
Image: autoscalingListener.Spec.Image,
|
||||
Env: listenerEnv,
|
||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||
ImagePullPolicy: autoscalingListener.Spec.ImagePullPolicy,
|
||||
Command: []string{
|
||||
"/github-runnerscaleset-listener",
|
||||
},
|
||||
@@ -181,54 +227,6 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
||||
return newRunnerScaleSetListenerPod
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||
|
||||
newLabels := map[string]string{
|
||||
LabelKeyRunnerSpecHash: runnerSpecHash,
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent: "runner-set",
|
||||
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
}
|
||||
|
||||
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil {
|
||||
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
|
||||
}
|
||||
|
||||
newAnnotations := map[string]string{
|
||||
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
|
||||
}
|
||||
|
||||
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
|
||||
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
|
||||
Labels: newLabels,
|
||||
Annotations: newAnnotations,
|
||||
},
|
||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||
Replicas: 0,
|
||||
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return newEphemeralRunnerSet, nil
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
|
||||
return &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -318,59 +316,52 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
|
||||
return newListenerSecret
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
||||
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||
|
||||
effectiveMinRunners := 0
|
||||
effectiveMaxRunners := math.MaxInt32
|
||||
if autoscalingRunnerSet.Spec.MaxRunners != nil {
|
||||
effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners
|
||||
}
|
||||
if autoscalingRunnerSet.Spec.MinRunners != nil {
|
||||
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
|
||||
newLabels := map[string]string{
|
||||
labelKeyRunnerSpecHash: runnerSpecHash,
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent: "runner-set",
|
||||
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
}
|
||||
|
||||
githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse github config from url: %v", err)
|
||||
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil {
|
||||
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
|
||||
}
|
||||
|
||||
autoscalingListener := &v1alpha1.AutoscalingListener{
|
||||
newAnnotations := map[string]string{
|
||||
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
|
||||
}
|
||||
|
||||
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerName(autoscalingRunnerSet),
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent: "runner-scale-set-listener",
|
||||
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
LabelKeyGitHubEnterprise: githubConfig.Enterprise,
|
||||
LabelKeyGitHubOrganization: githubConfig.Organization,
|
||||
LabelKeyGitHubRepository: githubConfig.Repository,
|
||||
LabelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
|
||||
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
|
||||
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
|
||||
Labels: newLabels,
|
||||
Annotations: newAnnotations,
|
||||
},
|
||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||
Replicas: 0,
|
||||
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingListenerSpec{
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
|
||||
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
|
||||
MinRunners: effectiveMinRunners,
|
||||
MaxRunners: effectiveMaxRunners,
|
||||
Image: image,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
},
|
||||
}
|
||||
|
||||
return autoscalingListener, nil
|
||||
return newEphemeralRunnerSet, nil
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
|
||||
|
||||
@@ -36,7 +36,7 @@ func TestLabelPropagation(t *testing.T) {
|
||||
assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf])
|
||||
assert.Equal(t, "runner-set", ephemeralRunnerSet.Labels[LabelKeyKubernetesComponent])
|
||||
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], ephemeralRunnerSet.Labels[LabelKeyKubernetesVersion])
|
||||
assert.NotEmpty(t, ephemeralRunnerSet.Labels[LabelKeyRunnerSpecHash])
|
||||
assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash])
|
||||
assert.Equal(t, autoscalingRunnerSet.Name, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName])
|
||||
assert.Equal(t, autoscalingRunnerSet.Namespace, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace])
|
||||
assert.Equal(t, "", ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise])
|
||||
@@ -49,7 +49,7 @@ func TestLabelPropagation(t *testing.T) {
|
||||
assert.Equal(t, labelValueKubernetesPartOf, listener.Labels[LabelKeyKubernetesPartOf])
|
||||
assert.Equal(t, "runner-scale-set-listener", listener.Labels[LabelKeyKubernetesComponent])
|
||||
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], listener.Labels[LabelKeyKubernetesVersion])
|
||||
assert.NotEmpty(t, ephemeralRunnerSet.Labels[LabelKeyRunnerSpecHash])
|
||||
assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash])
|
||||
assert.Equal(t, autoscalingRunnerSet.Name, listener.Labels[LabelKeyGitHubScaleSetName])
|
||||
assert.Equal(t, autoscalingRunnerSet.Namespace, listener.Labels[LabelKeyGitHubScaleSetNamespace])
|
||||
assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise])
|
||||
|
||||
@@ -105,12 +105,14 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
|
||||
Log: logf.Log,
|
||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||
GitHubClient: multiClient,
|
||||
RunnerImage: "example/runner:test",
|
||||
DockerImage: "example/docker:test",
|
||||
Name: controllerName("runner"),
|
||||
RegistrationRecheckInterval: time.Millisecond * 100,
|
||||
RegistrationRecheckJitter: time.Millisecond * 10,
|
||||
UnregistrationRetryDelay: 1 * time.Second,
|
||||
RunnerPodDefaults: RunnerPodDefaults{
|
||||
RunnerImage: "example/runner:test",
|
||||
DockerImage: "example/docker:test",
|
||||
},
|
||||
}
|
||||
err = runnerController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller")
|
||||
|
||||
@@ -285,16 +285,20 @@ func secretDataToGitHubClientConfig(data map[string][]byte) (*github.Config, err
|
||||
|
||||
appID := string(data["github_app_id"])
|
||||
|
||||
conf.AppID, err = strconv.ParseInt(appID, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if appID != "" {
|
||||
conf.AppID, err = strconv.ParseInt(appID, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
instID := string(data["github_app_installation_id"])
|
||||
|
||||
conf.AppInstallationID, err = strconv.ParseInt(instID, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if instID != "" {
|
||||
conf.AppInstallationID, err = strconv.ParseInt(instID, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
conf.AppPrivateKey = string(data["github_app_private_key"])
|
||||
|
||||
@@ -15,6 +15,21 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func newRunnerPod(template corev1.Pod, runnerSpec arcv1alpha1.RunnerConfig, githubBaseURL string, d RunnerPodDefaults) (corev1.Pod, error) {
|
||||
return newRunnerPodWithContainerMode("", template, runnerSpec, githubBaseURL, d)
|
||||
}
|
||||
|
||||
func setEnv(c *corev1.Container, name, value string) {
|
||||
for j := range c.Env {
|
||||
e := &c.Env[j]
|
||||
|
||||
if e.Name == name {
|
||||
e.Value = value
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newWorkGenericEphemeralVolume(t *testing.T, storageReq string) corev1.Volume {
|
||||
GBs, err := resource.ParseQuantity(storageReq)
|
||||
if err != nil {
|
||||
@@ -171,7 +186,7 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "DOCKER_GROUP_GID",
|
||||
Value: "121",
|
||||
Value: "1234",
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
@@ -397,6 +412,50 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
config: arcv1alpha1.RunnerConfig{},
|
||||
want: newTestPod(base, nil),
|
||||
},
|
||||
{
|
||||
description: "it should respect DOCKER_GROUP_GID of the dockerd sidecar container",
|
||||
template: corev1.Pod{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "docker",
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "DOCKER_GROUP_GID",
|
||||
Value: "2345",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: arcv1alpha1.RunnerConfig{},
|
||||
want: newTestPod(base, func(p *corev1.Pod) {
|
||||
setEnv(&p.Spec.Containers[1], "DOCKER_GROUP_GID", "2345")
|
||||
}),
|
||||
},
|
||||
{
|
||||
description: "it should add DOCKER_GROUP_GID=1001 to the dockerd sidecar container for Ubuntu 20.04 runners",
|
||||
template: corev1.Pod{},
|
||||
config: arcv1alpha1.RunnerConfig{
|
||||
Image: "ghcr.io/summerwind/actions-runner:ubuntu-20.04-20210726-1",
|
||||
},
|
||||
want: newTestPod(base, func(p *corev1.Pod) {
|
||||
setEnv(&p.Spec.Containers[1], "DOCKER_GROUP_GID", "1001")
|
||||
p.Spec.Containers[0].Image = "ghcr.io/summerwind/actions-runner:ubuntu-20.04-20210726-1"
|
||||
}),
|
||||
},
|
||||
{
|
||||
description: "it should add DOCKER_GROUP_GID=121 to the dockerd sidecar container for Ubuntu 22.04 runners",
|
||||
template: corev1.Pod{},
|
||||
config: arcv1alpha1.RunnerConfig{
|
||||
Image: "ghcr.io/summerwind/actions-runner:ubuntu-22.04-20210726-1",
|
||||
},
|
||||
want: newTestPod(base, func(p *corev1.Pod) {
|
||||
setEnv(&p.Spec.Containers[1], "DOCKER_GROUP_GID", "121")
|
||||
p.Spec.Containers[0].Image = "ghcr.io/summerwind/actions-runner:ubuntu-22.04-20210726-1"
|
||||
}),
|
||||
},
|
||||
{
|
||||
description: "dockerdWithinRunnerContainer=true should set privileged=true and omit the dind sidecar container",
|
||||
template: corev1.Pod{},
|
||||
@@ -552,7 +611,14 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
for i := range testcases {
|
||||
tc := testcases[i]
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
got, err := newRunnerPod(tc.template, tc.config, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, false)
|
||||
got, err := newRunnerPod(tc.template, tc.config, githubBaseURL, RunnerPodDefaults{
|
||||
RunnerImage: defaultRunnerImage,
|
||||
RunnerImagePullSecrets: defaultRunnerImagePullSecrets,
|
||||
DockerImage: defaultDockerImage,
|
||||
DockerRegistryMirror: defaultDockerRegistryMirror,
|
||||
DockerGID: "1234",
|
||||
UseRunnerStatusUpdateHook: false,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.want, got)
|
||||
})
|
||||
@@ -713,7 +779,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "DOCKER_GROUP_GID",
|
||||
Value: "121",
|
||||
Value: "1234",
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
@@ -1171,6 +1237,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
defaultRunnerImage = "default-runner-image"
|
||||
defaultRunnerImagePullSecrets = []string{}
|
||||
defaultDockerImage = "default-docker-image"
|
||||
defaultDockerGID = "1234"
|
||||
defaultDockerRegistryMirror = ""
|
||||
githubBaseURL = "api.github.com"
|
||||
)
|
||||
@@ -1190,12 +1257,15 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
r := &RunnerReconciler{
|
||||
RunnerImage: defaultRunnerImage,
|
||||
RunnerImagePullSecrets: defaultRunnerImagePullSecrets,
|
||||
DockerImage: defaultDockerImage,
|
||||
DockerRegistryMirror: defaultDockerRegistryMirror,
|
||||
GitHubClient: multiClient,
|
||||
Scheme: scheme,
|
||||
GitHubClient: multiClient,
|
||||
Scheme: scheme,
|
||||
RunnerPodDefaults: RunnerPodDefaults{
|
||||
RunnerImage: defaultRunnerImage,
|
||||
RunnerImagePullSecrets: defaultRunnerImagePullSecrets,
|
||||
DockerImage: defaultDockerImage,
|
||||
DockerRegistryMirror: defaultDockerRegistryMirror,
|
||||
DockerGID: defaultDockerGID,
|
||||
},
|
||||
}
|
||||
got, err := r.newPod(tc.runner)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -68,15 +68,24 @@ type RunnerReconciler struct {
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
GitHubClient *MultiGitHubClient
|
||||
RunnerImage string
|
||||
RunnerImagePullSecrets []string
|
||||
DockerImage string
|
||||
DockerRegistryMirror string
|
||||
Name string
|
||||
RegistrationRecheckInterval time.Duration
|
||||
RegistrationRecheckJitter time.Duration
|
||||
UseRunnerStatusUpdateHook bool
|
||||
UnregistrationRetryDelay time.Duration
|
||||
|
||||
RunnerPodDefaults RunnerPodDefaults
|
||||
}
|
||||
|
||||
type RunnerPodDefaults struct {
|
||||
RunnerImage string
|
||||
RunnerImagePullSecrets []string
|
||||
DockerImage string
|
||||
DockerRegistryMirror string
|
||||
// The default Docker group ID to use for the dockerd sidecar container.
|
||||
// Ubuntu 20.04 runner images assumes 1001 and the 22.04 variant assumes 121 by default.
|
||||
DockerGID string
|
||||
|
||||
UseRunnerStatusUpdateHook bool
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -145,7 +154,7 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
||||
|
||||
ready := runnerPodReady(&pod)
|
||||
|
||||
if (runner.Status.Phase != phase || runner.Status.Ready != ready) && !r.UseRunnerStatusUpdateHook || runner.Status.Phase == "" && r.UseRunnerStatusUpdateHook {
|
||||
if (runner.Status.Phase != phase || runner.Status.Ready != ready) && !r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Status.Phase == "" && r.RunnerPodDefaults.UseRunnerStatusUpdateHook {
|
||||
if pod.Status.Phase == corev1.PodRunning {
|
||||
// Seeing this message, you can expect the runner to become `Running` soon.
|
||||
log.V(1).Info(
|
||||
@@ -292,7 +301,7 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
needsServiceAccount := runner.Spec.ServiceAccountName == "" && (r.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes")
|
||||
needsServiceAccount := runner.Spec.ServiceAccountName == "" && (r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes")
|
||||
if needsServiceAccount {
|
||||
serviceAccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -306,7 +315,7 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
||||
|
||||
rules := []rbacv1.PolicyRule{}
|
||||
|
||||
if r.UseRunnerStatusUpdateHook {
|
||||
if r.RunnerPodDefaults.UseRunnerStatusUpdateHook {
|
||||
rules = append(rules, []rbacv1.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"actions.summerwind.dev"},
|
||||
@@ -583,7 +592,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
}
|
||||
}
|
||||
|
||||
pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, ghc.GithubBaseURL, r.UseRunnerStatusUpdateHook)
|
||||
pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, ghc.GithubBaseURL, r.RunnerPodDefaults)
|
||||
if err != nil {
|
||||
return pod, err
|
||||
}
|
||||
@@ -634,7 +643,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
|
||||
if runnerSpec.ServiceAccountName != "" {
|
||||
pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName
|
||||
} else if r.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes" {
|
||||
} else if r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes" {
|
||||
pod.Spec.ServiceAccountName = runner.ObjectMeta.Name
|
||||
}
|
||||
|
||||
@@ -754,13 +763,19 @@ func runnerHookEnvs(pod *corev1.Pod) ([]corev1.EnvVar, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, useRunnerStatusUpdateHook bool) (corev1.Pod, error) {
|
||||
func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, githubBaseURL string, d RunnerPodDefaults) (corev1.Pod, error) {
|
||||
var (
|
||||
privileged bool = true
|
||||
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
||||
dockerEnabled bool = runnerSpec.DockerEnabled == nil || *runnerSpec.DockerEnabled
|
||||
ephemeral bool = runnerSpec.Ephemeral == nil || *runnerSpec.Ephemeral
|
||||
dockerdInRunnerPrivileged bool = dockerdInRunner
|
||||
|
||||
defaultRunnerImage = d.RunnerImage
|
||||
defaultRunnerImagePullSecrets = d.RunnerImagePullSecrets
|
||||
defaultDockerImage = d.DockerImage
|
||||
defaultDockerRegistryMirror = d.DockerRegistryMirror
|
||||
useRunnerStatusUpdateHook = d.UseRunnerStatusUpdateHook
|
||||
)
|
||||
|
||||
if containerMode == "kubernetes" {
|
||||
@@ -1013,10 +1028,22 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
||||
// for actions-runner-controller) so typically should not need to be
|
||||
// overridden
|
||||
if ok, _ := envVarPresent("DOCKER_GROUP_GID", dockerdContainer.Env); !ok {
|
||||
gid := d.DockerGID
|
||||
// We default to gid 121 for Ubuntu 22.04 images
|
||||
// See below for more details
|
||||
// - https://github.com/actions/actions-runner-controller/issues/2490#issuecomment-1501561923
|
||||
// - https://github.com/actions/actions-runner-controller/blob/8869ad28bb5a1daaedefe0e988571fe1fb36addd/runner/actions-runner.ubuntu-20.04.dockerfile#L14
|
||||
// - https://github.com/actions/actions-runner-controller/blob/8869ad28bb5a1daaedefe0e988571fe1fb36addd/runner/actions-runner.ubuntu-22.04.dockerfile#L12
|
||||
if strings.Contains(runnerContainer.Image, "22.04") {
|
||||
gid = "121"
|
||||
} else if strings.Contains(runnerContainer.Image, "20.04") {
|
||||
gid = "1001"
|
||||
}
|
||||
|
||||
dockerdContainer.Env = append(dockerdContainer.Env,
|
||||
corev1.EnvVar{
|
||||
Name: "DOCKER_GROUP_GID",
|
||||
Value: "121",
|
||||
Value: gid,
|
||||
})
|
||||
}
|
||||
dockerdContainer.Args = append(dockerdContainer.Args, "--group=$(DOCKER_GROUP_GID)")
|
||||
@@ -1240,10 +1267,6 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
||||
return *pod, nil
|
||||
}
|
||||
|
||||
func newRunnerPod(template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, useRunnerStatusUpdateHookEphemeralRole bool) (corev1.Pod, error) {
|
||||
return newRunnerPodWithContainerMode("", template, runnerSpec, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, useRunnerStatusUpdateHookEphemeralRole)
|
||||
}
|
||||
|
||||
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
name := "runner-controller"
|
||||
if r.Name != "" {
|
||||
|
||||
@@ -45,13 +45,10 @@ type RunnerSetReconciler struct {
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
|
||||
CommonRunnerLabels []string
|
||||
GitHubClient *MultiGitHubClient
|
||||
RunnerImage string
|
||||
RunnerImagePullSecrets []string
|
||||
DockerImage string
|
||||
DockerRegistryMirror string
|
||||
UseRunnerStatusUpdateHook bool
|
||||
CommonRunnerLabels []string
|
||||
GitHubClient *MultiGitHubClient
|
||||
|
||||
RunnerPodDefaults RunnerPodDefaults
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -231,7 +228,7 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
|
||||
|
||||
githubBaseURL := ghc.GithubBaseURL
|
||||
|
||||
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, githubBaseURL, r.UseRunnerStatusUpdateHook)
|
||||
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, githubBaseURL, r.RunnerPodDefaults)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -86,4 +86,4 @@ Or for example if they're having problems specifically with runners:
|
||||
This way users don't have to understand ARC moving parts but we still have a
|
||||
way to target them specifically if we need to.
|
||||
|
||||
[^1]: Superseded by [ADR 2023-04-14](2023-04-14-adding-labels-k8s-resources.md)
|
||||
[^1]: Superseded by [ADR 2023-03-14](2023-03-14-adding-labels-k8s-resources.md)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
**Date**: 2023-02-10
|
||||
|
||||
**Status**: Done
|
||||
**Status**: Superceded [^1]
|
||||
|
||||
## Context
|
||||
|
||||
@@ -136,3 +136,5 @@ The downside of this mode:
|
||||
|
||||
- When you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other.
|
||||
- You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster.
|
||||
|
||||
[^1]: Superseded by [ADR 2023-04-11](2023-04-11-limit-manager-role-permission.md)
|
||||
|
||||
167
docs/adrs/2023-04-11-limit-manager-role-permission.md
Normal file
167
docs/adrs/2023-04-11-limit-manager-role-permission.md
Normal file
@@ -0,0 +1,167 @@
|
||||
# ADR 2023-04-11: Limit Permissions for Service Accounts in Actions-Runner-Controller
|
||||
|
||||
**Date**: 2023-04-11
|
||||
|
||||
**Status**: Done [^1]
|
||||
|
||||
## Context
|
||||
|
||||
- `actions-runner-controller` is a Kubernetes CRD (with controller) built using https://github.com/kubernetes-sigs/controller-runtime
|
||||
|
||||
- [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) has a default cache based k8s API client.Reader to make query k8s API server more efficiency.
|
||||
|
||||
- The cache-based API client requires cluster scope `list` and `watch` permission for any resource the controller may query.
|
||||
|
||||
- This documentation only scopes to the AutoscalingRunnerSet CRD and its controller.
|
||||
|
||||
## Service accounts and their role binding in actions-runner-controller
|
||||
|
||||
There are 3 service accounts involved for a working `AutoscalingRunnerSet` based `actions-runner-controller`
|
||||
|
||||
1. Service account for each Ephemeral runner Pod
|
||||
|
||||
This should have the lowest privilege (not any `RoleBinding` nor `ClusterRoleBinding`) by default, in the case of `containerMode=kubernetes`, it will get certain write permission with `RoleBinding` to limit the permission to a single namespace.
|
||||
|
||||
> References:
|
||||
>
|
||||
> - ./charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml
|
||||
> - ./charts/gha-runner-scale-set/templates/kube_mode_role.yaml
|
||||
> - ./charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml
|
||||
> - ./charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml
|
||||
|
||||
2. Service account for AutoScalingListener Pod
|
||||
|
||||
This has a `RoleBinding` to a single namespace with a `Role` that has permission to `PATCH` `EphemeralRunnerSet` and `EphemeralRunner`.
|
||||
|
||||
3. Service account for the controller manager
|
||||
|
||||
Since the CRD controller is a singleton installed in the cluster that manages the CRD across multiple namespaces by default, the service account of the controller manager pod has a `ClusterRoleBinding` to a `ClusterRole` with broader permissions.
|
||||
|
||||
The current `ClusterRole` has the following permissions:
|
||||
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingRunnerSets` (with `Status` and `Finalizer` sub-resource)
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingListeners` (with `Status` and `Finalizer` sub-resource)
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunnerSets` (with `Status` and `Finalizer` sub-resource)
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunners` (with `Status` and `Finalizer` sub-resource)
|
||||
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `Pods` (with `Status` sub-resource)
|
||||
- **Get/List/Create/Delete/Update/Patch/Watch on `Secrets`**
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `Roles`
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `RoleBindings`
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `ServiceAccounts`
|
||||
|
||||
> Full list can be found at: https://github.com/actions/actions-runner-controller/blob/facae69e0b189d3b5dd659f36df8a829516d2896/charts/actions-runner-controller-2/templates/manager_role.yaml
|
||||
|
||||
## Limit cluster role permission on Secrets
|
||||
|
||||
The cluster scope `List` `Secrets` permission might be a blocker for adopting `actions-runner-controller` for certain customers as they may have certain restriction in their cluster that simply doesn't allow any service account to have cluster scope `List Secrets` permission.
|
||||
|
||||
To help these customers and improve security for `actions-runner-controller` in general, we will try to limit the `ClusterRole` permission of the controller manager's service account down to the following:
|
||||
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingRunnerSets` (with `Status` and `Finalizer` sub-resource)
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingListeners` (with `Status` and `Finalizer` sub-resource)
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunnerSets` (with `Status` and `Finalizer` sub-resource)
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunners` (with `Status` and `Finalizer` sub-resource)
|
||||
|
||||
- List/Watch on `Pods`
|
||||
- List/Watch/Patch on `Roles`
|
||||
- List/Watch on `RoleBindings`
|
||||
- List/Watch on `ServiceAccounts`
|
||||
|
||||
> We will change the default cache-based client to bypass cache on reading `Secrets` and `ConfigMaps`(ConfigMap is used when you configure `githubServerTLS`), so we can eliminate the need for `List` and `Watch` `Secrets` permission in cluster scope.
|
||||
|
||||
Introduce a new `Role` for the controller and `RoleBinding` the `Role` with the controller's `ServiceAccount` in the namespace the controller is deployed. This role will grant the controller's service account required permission to work with `AutoScalingListeners` in the controller namespace.
|
||||
|
||||
- Get/Create/Delete on `Pods`
|
||||
- Get on `Pods/status`
|
||||
- Get/Create/Delete/Update/Patch on `Secrets`
|
||||
- Get/Create/Delete/Update/Patch on `ServiceAccounts`
|
||||
|
||||
The `Role` and `RoleBinding` creation will happen during the `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller`
|
||||
|
||||
During `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller`, we will store the controller's service account info as labels on the controller `Deployment`.
|
||||
Ex:
|
||||
|
||||
```yaml
|
||||
actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }}
|
||||
actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||
```
|
||||
|
||||
Introduce a new `Role` per `AutoScalingRunnerSet` installation and `RoleBinding` the `Role` with the controller's `ServiceAccount` in the namespace that each `AutoScalingRunnerSet` deployed with the following permission.
|
||||
|
||||
- Get/Create/Delete/Update/Patch/List on `Secrets`
|
||||
- Create/Delete on `Pods`
|
||||
- Get on `Pods/status`
|
||||
- Get/Create/Delete/Update/Patch on `Roles`
|
||||
- Get/Create/Delete/Update/Patch on `RoleBindings`
|
||||
- Get on `ConfigMaps`
|
||||
|
||||
The `Role` and `RoleBinding` creation will happen during `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set` to grant the controller's service account required permissions to operate in the namespace the `AutoScalingRunnerSet` deployed.
|
||||
|
||||
The `gha-runner-scale-set` helm chart will try to find the `Deployment` of the controller using `helm lookup`, and get the service account info from the labels of the controller `Deployment` (`actions.github.com/controller-service-account-namespace` and `actions.github.com/controller-service-account-name`).
|
||||
|
||||
The `gha-runner-scale-set` helm chart will use this service account to properly render the `RoleBinding` template.
|
||||
|
||||
The `gha-runner-scale-set` helm chart will also allow customers to explicitly provide the controller service account info, in case the `helm lookup` couldn't locate the right controller `Deployment`.
|
||||
|
||||
New sections in `values.yaml` of `gha-runner-scale-set`:
|
||||
|
||||
```yaml
|
||||
## Optional controller service account that needs to have required Role and RoleBinding
|
||||
## to operate this gha-runner-scale-set installation.
|
||||
## The helm chart will try to find the controller deployment and its service account at installation time.
|
||||
## In case the helm chart can't find the right service account, you can explicitly pass in the following value
|
||||
## to help it finish RoleBinding with the right service account.
|
||||
## Note: if your controller is installed to only watch a single namespace, you have to pass these values explicitly.
|
||||
controllerServiceAccount:
|
||||
namespace: arc-system
|
||||
name: test-arc-gha-runner-scale-set-controller
|
||||
```
|
||||
|
||||
## Install ARC to only watch/react resources in a single namespace
|
||||
|
||||
In case the user doesn't want to have any `ClusterRole`, they can choose to install the `actions-runner-controller` in a mode that only requires a `Role` with `RoleBinding` in a particular namespace.
|
||||
|
||||
In this mode, the `actions-runner-controller` will only be able to watch the `AutoScalingRunnerSet` resource in a single namespace.
|
||||
|
||||
If you want to deploy multiple `AutoScalingRunnerSet` into different namespaces, you will need to install `actions-runner-controller` in this mode multiple times as well and have each installation watch the namespace you want to deploy an `AutoScalingRunnerSet`
|
||||
|
||||
You will install `actions-runner-controller` with something like `helm install arc --namespace arc-system --set watchSingleNamespace=test-namespace oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller` (the `test-namespace` namespace needs to be created first).
|
||||
|
||||
You will deploy the `AutoScalingRunnerSet` with something like `helm install demo --namespace TestNamespace oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set`
|
||||
|
||||
In this mode, you will end up with a manager `Role` that has all Get/List/Create/Delete/Update/Patch/Watch permissions on resources we need, and a `RoleBinding` to bind the `Role` with the controller `ServiceAccount` in the watched single namespace and the controller namespace, ex: `test-namespace` and `arc-system` in the above example.
|
||||
|
||||
The downside of this mode:
|
||||
|
||||
- When you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other.
|
||||
- You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster.
|
||||
|
||||
## Cleanup process
|
||||
|
||||
We will apply following annotations during the installation that are going to be used in the cleanup process (`helm uninstall`). If annotation is not present, cleanup of that resource is going to be skipped.
|
||||
|
||||
The cleanup only patches the resource removing the `actions.github.com/cleanup-protection` finalizer. The client that created a resource is responsible for deleting them. Keep in mind, `helm uninstall` will automatically delete resources, causing the cleanup procedure to be complete.
|
||||
|
||||
Annotations applied to the `AutoscalingRunnerSet` used in the cleanup procedure
|
||||
are:
|
||||
|
||||
- `actions.github.com/cleanup-github-secret-name`
|
||||
- `actions.github.com/cleanup-manager-role-binding`
|
||||
- `actions.github.com/cleanup-manager-role-name`
|
||||
- `actions.github.com/cleanup-kubernetes-mode-role-binding-name`
|
||||
- `actions.github.com/cleanup-kubernetes-mode-role-name`
|
||||
- `actions.github.com/cleanup-kubernetes-mode-service-account-name`
|
||||
- `actions.github.com/cleanup-no-permission-service-account-name`
|
||||
|
||||
The order in which resources are being patched to remove finalizers:
|
||||
|
||||
1. Kubernetes mode `RoleBinding`
|
||||
1. Kubernetes mode `Role`
|
||||
1. Kubernetes mode `ServiceAccount`
|
||||
1. No permission `ServiceAccount`
|
||||
1. GitHub `Secret`
|
||||
1. Manager `RoleBinding`
|
||||
1. Manager `Role`
|
||||
|
||||
[^1]: Supersedes [ADR 2023-02-10](2023-02-10-limit-manager-role-permission.md)
|
||||
@@ -36,7 +36,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
|
||||
--namespace "${NAMESPACE}" \
|
||||
--create-namespace \
|
||||
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \
|
||||
--version 0.3.0
|
||||
--version 0.4.0
|
||||
```
|
||||
|
||||
1. Generate a Personal Access Token (PAT) or create and install a GitHub App. See [Creating a personal access token](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token) and [Creating a GitHub App](https://docs.github.com/en/developers/apps/creating-a-github-app).
|
||||
@@ -57,7 +57,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="${GITHUB_CONFIG_URL}" \
|
||||
--set githubConfigSecret.github_token="${GITHUB_PAT}" \
|
||||
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.3.0
|
||||
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.4.0
|
||||
```
|
||||
|
||||
```bash
|
||||
@@ -75,7 +75,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
|
||||
--set githubConfigSecret.github_app_id="${GITHUB_APP_ID}" \
|
||||
--set githubConfigSecret.github_app_installation_id="${GITHUB_APP_INSTALLATION_ID}" \
|
||||
--set githubConfigSecret.github_app_private_key="${GITHUB_APP_PRIVATE_KEY}" \
|
||||
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.3.0
|
||||
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.4.0
|
||||
```
|
||||
|
||||
1. Check your installation. If everything went well, you should see the following:
|
||||
@@ -84,8 +84,8 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
|
||||
$ helm list -n "${NAMESPACE}"
|
||||
|
||||
NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
|
||||
arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed gha-runner-scale-set-controller-0.3.0 preview
|
||||
arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed gha-runner-scale-set-0.3.0 0.3.0
|
||||
arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed gha-runner-scale-set-controller-0.4.0 preview
|
||||
arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed gha-runner-scale-set-0.4.0 0.4.0
|
||||
```
|
||||
|
||||
```bash
|
||||
@@ -140,7 +140,7 @@ Upgrading actions-runner-controller requires a few extra steps because CRDs will
|
||||
|
||||
```bash
|
||||
helm pull oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \
|
||||
--version 0.3.0 \
|
||||
--version 0.4.0 \
|
||||
--untar && \
|
||||
kubectl replace -f <PATH>/gha-runner-scale-set-controller/crds/
|
||||
```
|
||||
@@ -149,6 +149,24 @@ Upgrading actions-runner-controller requires a few extra steps because CRDs will
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### I'm using the charts from the `master` branch and the controller is not working
|
||||
|
||||
The `master` branch is highly unstable! We offer no guarantees that the charts in the `master` branch will work at any given time. If you're using the charts from the `master` branch, you should expect to encounter issues. Please use the latest release instead.
|
||||
|
||||
### Controller pod is running but the runner set listener pod is not
|
||||
|
||||
You need to inspect the logs of the controller first and see if there are any errors. If there are no errors, and the runner set listener pod is still not running, you need to make sure that the **controller pod has access to the Kubernetes API server in your cluster!**
|
||||
|
||||
You'll see something similar to the following in the logs of the controller pod:
|
||||
|
||||
```log
|
||||
kubectl logs <controller_pod_name> -c manager
|
||||
17:35:28.661069 1 request.go:690] Waited for 1.032376652s due to client-side throttling, not priority and fairness, request: GET:https://10.0.0.1:443/apis/monitoring.coreos.com/v1alpha1?timeout=32s
|
||||
2023-03-15T17:35:29Z INFO starting manager
|
||||
```
|
||||
|
||||
If you have a proxy configured or you're using a sidecar proxy that's automatically injected (think [Istio](https://istio.io/)), you need to make sure it's configured appropriately to allow traffic from the controller container (manager) to the Kubernetes API server.
|
||||
|
||||
### Check the logs
|
||||
|
||||
You can check the logs of the controller pod using the following command:
|
||||
@@ -219,6 +237,35 @@ To fix this, you can either:
|
||||
|
||||
## Changelog
|
||||
|
||||
### v0.4.0
|
||||
|
||||
#### ⚠️ Warning
|
||||
|
||||
This release contains a major change related to the way permissions are
|
||||
applied to the manager ([#2276](https://github.com/actions/actions-runner-controller/pull/2276) and [#2363](https://github.com/actions/actions-runner-controller/pull/2363)).
|
||||
|
||||
Please evaluate these changes carefully before upgrading.
|
||||
|
||||
#### Major changes
|
||||
|
||||
1. Surface EphemeralRunnerSet stats to AutoscalingRunnerSet [#2382](https://github.com/actions/actions-runner-controller/pull/2382)
|
||||
1. Improved security posture by removing list/watch secrets permission from manager cluster role
|
||||
[#2276](https://github.com/actions/actions-runner-controller/pull/2276)
|
||||
1. Improved security posture by delaying role/rolebinding creation to gha-runner-scale-set during installation
|
||||
[#2363](https://github.com/actions/actions-runner-controller/pull/2363)
|
||||
1. Improved security posture by supporting watching a single namespace from the controller
|
||||
[#2374](https://github.com/actions/actions-runner-controller/pull/2374)
|
||||
1. Added labels to AutoscalingRunnerSet subresources to allow easier inspection [#2391](https://github.com/actions/actions-runner-controller/pull/2391)
|
||||
1. Fixed bug preventing env variables from being specified
|
||||
[#2450](https://github.com/actions/actions-runner-controller/pull/2450)
|
||||
1. Enhance quickstart troubleshooting guides
|
||||
[#2435](https://github.com/actions/actions-runner-controller/pull/2435)
|
||||
1. Fixed ignore extra dind container when container mode type is "dind"
|
||||
[#2418](https://github.com/actions/actions-runner-controller/pull/2418)
|
||||
1. Added additional cleanup finalizers [#2433](https://github.com/actions/actions-runner-controller/pull/2433)
|
||||
1. gha-runner-scale-set listener pod inherits the ImagePullPolicy from the manager pod [#2477](https://github.com/actions/actions-runner-controller/pull/2477)
|
||||
1. Treat `.ghe.com` domain as hosted environment [#2480](https://github.com/actions/actions-runner-controller/pull/2480)
|
||||
|
||||
### v0.3.0
|
||||
|
||||
#### Major changes
|
||||
|
||||
@@ -3,6 +3,7 @@ package actions
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -34,9 +35,7 @@ func ParseGitHubConfigFromURL(in string) (*GitHubConfig, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isHosted := u.Host == "github.com" ||
|
||||
u.Host == "www.github.com" ||
|
||||
u.Host == "github.localhost"
|
||||
isHosted := isHostedGitHubURL(u)
|
||||
|
||||
configURL := &GitHubConfig{
|
||||
ConfigURL: u,
|
||||
@@ -76,23 +75,35 @@ func ParseGitHubConfigFromURL(in string) (*GitHubConfig, error) {
|
||||
func (c *GitHubConfig) GitHubAPIURL(path string) *url.URL {
|
||||
result := &url.URL{
|
||||
Scheme: c.ConfigURL.Scheme,
|
||||
Host: c.ConfigURL.Host, // default for Enterprise mode
|
||||
Path: "/api/v3", // default for Enterprise mode
|
||||
}
|
||||
|
||||
switch c.ConfigURL.Host {
|
||||
// Hosted
|
||||
case "github.com", "github.localhost":
|
||||
result.Host = fmt.Sprintf("api.%s", c.ConfigURL.Host)
|
||||
// re-routing www.github.com to api.github.com
|
||||
case "www.github.com":
|
||||
result.Host = "api.github.com"
|
||||
isHosted := isHostedGitHubURL(c.ConfigURL)
|
||||
|
||||
// Enterprise
|
||||
default:
|
||||
result.Host = c.ConfigURL.Host
|
||||
result.Path = "/api/v3"
|
||||
if isHosted {
|
||||
result.Host = fmt.Sprintf("api.%s", c.ConfigURL.Host)
|
||||
result.Path = ""
|
||||
|
||||
if strings.EqualFold("www.github.com", c.ConfigURL.Host) {
|
||||
// re-routing www.github.com to api.github.com
|
||||
result.Host = "api.github.com"
|
||||
}
|
||||
}
|
||||
|
||||
result.Path += path
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func isHostedGitHubURL(u *url.URL) bool {
|
||||
_, forceGhes := os.LookupEnv("GITHUB_ACTIONS_FORCE_GHES")
|
||||
if forceGhes {
|
||||
return false
|
||||
}
|
||||
|
||||
return strings.EqualFold(u.Host, "github.com") ||
|
||||
strings.EqualFold(u.Host, "www.github.com") ||
|
||||
strings.EqualFold(u.Host, "github.localhost") ||
|
||||
strings.HasSuffix(u.Host, ".ghe.com")
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package actions_test
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -117,6 +118,16 @@ func TestGitHubConfig(t *testing.T) {
|
||||
IsHosted: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
configURL: "https://my-ghes.ghe.com/org/",
|
||||
expected: &actions.GitHubConfig{
|
||||
Scope: actions.GitHubScopeOrganization,
|
||||
Enterprise: "",
|
||||
Organization: "org",
|
||||
Repository: "",
|
||||
IsHosted: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -151,9 +162,35 @@ func TestGitHubConfig_GitHubAPIURL(t *testing.T) {
|
||||
t.Run("when hosted", func(t *testing.T) {
|
||||
config, err := actions.ParseGitHubConfigFromURL("https://github.com/org/repo")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, config.IsHosted)
|
||||
|
||||
result := config.GitHubAPIURL("/some/path")
|
||||
assert.Equal(t, "https://api.github.com/some/path", result.String())
|
||||
})
|
||||
t.Run("when not hosted", func(t *testing.T) {})
|
||||
t.Run("when hosted with ghe.com", func(t *testing.T) {
|
||||
config, err := actions.ParseGitHubConfigFromURL("https://github.ghe.com/org/repo")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, config.IsHosted)
|
||||
|
||||
result := config.GitHubAPIURL("/some/path")
|
||||
assert.Equal(t, "https://api.github.ghe.com/some/path", result.String())
|
||||
})
|
||||
t.Run("when not hosted", func(t *testing.T) {
|
||||
config, err := actions.ParseGitHubConfigFromURL("https://ghes.com/org/repo")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, config.IsHosted)
|
||||
|
||||
result := config.GitHubAPIURL("/some/path")
|
||||
assert.Equal(t, "https://ghes.com/api/v3/some/path", result.String())
|
||||
})
|
||||
t.Run("when not hosted with ghe.com", func(t *testing.T) {
|
||||
os.Setenv("GITHUB_ACTIONS_FORCE_GHES", "1")
|
||||
defer os.Unsetenv("GITHUB_ACTIONS_FORCE_GHES")
|
||||
config, err := actions.ParseGitHubConfigFromURL("https://test.ghe.com/org/repo")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, config.IsHosted)
|
||||
|
||||
result := config.GitHubAPIURL("/some/path")
|
||||
assert.Equal(t, "https://test.ghe.com/api/v3/some/path", result.String())
|
||||
})
|
||||
}
|
||||
|
||||
61
main.go
61
main.go
@@ -45,6 +45,7 @@ import (
|
||||
const (
|
||||
defaultRunnerImage = "summerwind/actions-runner:latest"
|
||||
defaultDockerImage = "docker:dind"
|
||||
defaultDockerGID = "1001"
|
||||
)
|
||||
|
||||
var scheme = runtime.NewScheme()
|
||||
@@ -76,18 +77,15 @@ func main() {
|
||||
autoScalingRunnerSetOnly bool
|
||||
enableLeaderElection bool
|
||||
disableAdmissionWebhook bool
|
||||
runnerStatusUpdateHook bool
|
||||
leaderElectionId string
|
||||
port int
|
||||
syncPeriod time.Duration
|
||||
|
||||
defaultScaleDownDelay time.Duration
|
||||
|
||||
runnerImage string
|
||||
runnerImagePullSecrets stringSlice
|
||||
runnerPodDefaults actionssummerwindnet.RunnerPodDefaults
|
||||
|
||||
dockerImage string
|
||||
dockerRegistryMirror string
|
||||
namespace string
|
||||
logLevel string
|
||||
logFormat string
|
||||
@@ -108,10 +106,11 @@ func main() {
|
||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
||||
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||
flag.StringVar(&leaderElectionId, "leader-election-id", "actions-runner-controller", "Controller id for leader election.")
|
||||
flag.StringVar(&runnerImage, "runner-image", defaultRunnerImage, "The image name of self-hosted runner container to use by default if one isn't defined in yaml.")
|
||||
flag.StringVar(&dockerImage, "docker-image", defaultDockerImage, "The image name of docker sidecar container to use by default if one isn't defined in yaml.")
|
||||
flag.StringVar(&runnerPodDefaults.RunnerImage, "runner-image", defaultRunnerImage, "The image name of self-hosted runner container to use by default if one isn't defined in yaml.")
|
||||
flag.StringVar(&runnerPodDefaults.DockerImage, "docker-image", defaultDockerImage, "The image name of docker sidecar container to use by default if one isn't defined in yaml.")
|
||||
flag.StringVar(&runnerPodDefaults.DockerGID, "docker-gid", defaultDockerGID, "The default GID of docker group in the docker sidecar container. Use 1001 for dockerd sidecars of Ubuntu 20.04 runners 121 for Ubuntu 22.04.")
|
||||
flag.Var(&runnerImagePullSecrets, "runner-image-pull-secret", "The default image-pull secret name for self-hosted runner container.")
|
||||
flag.StringVar(&dockerRegistryMirror, "docker-registry-mirror", "", "The default Docker Registry Mirror used by runners.")
|
||||
flag.StringVar(&runnerPodDefaults.DockerRegistryMirror, "docker-registry-mirror", "", "The default Docker Registry Mirror used by runners.")
|
||||
flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.")
|
||||
flag.StringVar(&c.EnterpriseURL, "github-enterprise-url", c.EnterpriseURL, "Enterprise URL to be used for your GitHub API calls")
|
||||
flag.Int64Var(&c.AppID, "github-app-id", c.AppID, "The application ID of GitHub App.")
|
||||
@@ -122,7 +121,7 @@ func main() {
|
||||
flag.StringVar(&c.BasicauthUsername, "github-basicauth-username", c.BasicauthUsername, "Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API")
|
||||
flag.StringVar(&c.BasicauthPassword, "github-basicauth-password", c.BasicauthPassword, "Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API")
|
||||
flag.StringVar(&c.RunnerGitHubURL, "runner-github-url", c.RunnerGitHubURL, "GitHub URL to be used by runners during registration")
|
||||
flag.BoolVar(&runnerStatusUpdateHook, "runner-status-update-hook", false, "Use custom RBAC for runners (role, role binding and service account).")
|
||||
flag.BoolVar(&runnerPodDefaults.UseRunnerStatusUpdateHook, "runner-status-update-hook", false, "Use custom RBAC for runners (role, role binding and service account).")
|
||||
flag.DurationVar(&defaultScaleDownDelay, "default-scale-down-delay", actionssummerwindnet.DefaultScaleDownDelay, "The approximate delay for a scale down followed by a scale up, used to prevent flapping (down->up->down->... loop)")
|
||||
flag.IntVar(&port, "port", 9443, "The port to which the admission webhook endpoint should bind")
|
||||
flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.")
|
||||
@@ -135,6 +134,8 @@ func main() {
|
||||
flag.Var(&autoScalerImagePullSecrets, "auto-scaler-image-pull-secrets", "The default image-pull secret name for auto-scaler listener container.")
|
||||
flag.Parse()
|
||||
|
||||
runnerPodDefaults.RunnerImagePullSecrets = runnerImagePullSecrets
|
||||
|
||||
log, err := logging.NewLogger(logLevel, logFormat)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err)
|
||||
@@ -170,6 +171,13 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
listenerPullPolicy := os.Getenv("CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY")
|
||||
if ok := actionsgithubcom.SetListenerImagePullPolicy(listenerPullPolicy); ok {
|
||||
log.Info("AutoscalingListener image pull policy changed", "ImagePullPolicy", listenerPullPolicy)
|
||||
} else {
|
||||
log.Info("Using default AutoscalingListener image pull policy", "ImagePullPolicy", actionsgithubcom.DefaultScaleSetListenerImagePullPolicy)
|
||||
}
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
NewCache: newCache,
|
||||
@@ -248,16 +256,11 @@ func main() {
|
||||
)
|
||||
|
||||
runnerReconciler := &actionssummerwindnet.RunnerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("runner"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
GitHubClient: multiClient,
|
||||
DockerImage: dockerImage,
|
||||
DockerRegistryMirror: dockerRegistryMirror,
|
||||
UseRunnerStatusUpdateHook: runnerStatusUpdateHook,
|
||||
// Defaults for self-hosted runner containers
|
||||
RunnerImage: runnerImage,
|
||||
RunnerImagePullSecrets: runnerImagePullSecrets,
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("runner"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
GitHubClient: multiClient,
|
||||
RunnerPodDefaults: runnerPodDefaults,
|
||||
}
|
||||
|
||||
if err = runnerReconciler.SetupWithManager(mgr); err != nil {
|
||||
@@ -289,17 +292,12 @@ func main() {
|
||||
}
|
||||
|
||||
runnerSetReconciler := &actionssummerwindnet.RunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("runnerset"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
CommonRunnerLabels: commonRunnerLabels,
|
||||
DockerImage: dockerImage,
|
||||
DockerRegistryMirror: dockerRegistryMirror,
|
||||
GitHubClient: multiClient,
|
||||
// Defaults for self-hosted runner containers
|
||||
RunnerImage: runnerImage,
|
||||
RunnerImagePullSecrets: runnerImagePullSecrets,
|
||||
UseRunnerStatusUpdateHook: runnerStatusUpdateHook,
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("runnerset"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
CommonRunnerLabels: commonRunnerLabels,
|
||||
GitHubClient: multiClient,
|
||||
RunnerPodDefaults: runnerPodDefaults,
|
||||
}
|
||||
|
||||
if err = runnerSetReconciler.SetupWithManager(mgr); err != nil {
|
||||
@@ -312,8 +310,9 @@ func main() {
|
||||
"version", build.Version,
|
||||
"default-scale-down-delay", defaultScaleDownDelay,
|
||||
"sync-period", syncPeriod,
|
||||
"default-runner-image", runnerImage,
|
||||
"default-docker-image", dockerImage,
|
||||
"default-runner-image", runnerPodDefaults.RunnerImage,
|
||||
"default-docker-image", runnerPodDefaults.DockerImage,
|
||||
"default-docker-gid", runnerPodDefaults.DockerGID,
|
||||
"common-runnner-labels", commonRunnerLabels,
|
||||
"leader-election-enabled", enableLeaderElection,
|
||||
"leader-election-id", leaderElectionId,
|
||||
|
||||
@@ -136,12 +136,27 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in
|
||||
// job_conclusion -> (neutral, success, skipped, cancelled, timed_out, action_required, failure)
|
||||
githubWorkflowJobConclusionsTotal.With(extraLabel("job_conclusion", *e.WorkflowJob.Conclusion, labels)).Inc()
|
||||
|
||||
parseResult, err := reader.fetchAndParseWorkflowJobLogs(ctx, e)
|
||||
if err != nil {
|
||||
log.Error(err, "reading workflow job log")
|
||||
return
|
||||
} else {
|
||||
log.Info("reading workflow_job logs", keysAndValues...)
|
||||
var (
|
||||
exitCode = "na"
|
||||
runTimeSeconds *float64
|
||||
)
|
||||
|
||||
// We need to do our best not to fail the whole event processing
|
||||
// when the user provided no GitHub API credentials.
|
||||
// See https://github.com/actions/actions-runner-controller/issues/2424
|
||||
if reader.GitHubClient != nil {
|
||||
parseResult, err := reader.fetchAndParseWorkflowJobLogs(ctx, e)
|
||||
if err != nil {
|
||||
log.Error(err, "reading workflow job log")
|
||||
return
|
||||
}
|
||||
|
||||
exitCode = parseResult.ExitCode
|
||||
|
||||
s := parseResult.RunTime.Seconds()
|
||||
runTimeSeconds = &s
|
||||
|
||||
log.WithValues(keysAndValues...).Info("reading workflow_job logs", "exit_code", exitCode)
|
||||
}
|
||||
|
||||
if *e.WorkflowJob.Conclusion == "failure" {
|
||||
@@ -167,18 +182,20 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in
|
||||
}
|
||||
if *conclusion == "timed_out" {
|
||||
failedStep = fmt.Sprint(i)
|
||||
parseResult.ExitCode = "timed_out"
|
||||
exitCode = "timed_out"
|
||||
break
|
||||
}
|
||||
}
|
||||
githubWorkflowJobFailuresTotal.With(
|
||||
extraLabel("failed_step", failedStep,
|
||||
extraLabel("exit_code", parseResult.ExitCode, labels),
|
||||
extraLabel("exit_code", exitCode, labels),
|
||||
),
|
||||
).Inc()
|
||||
}
|
||||
|
||||
githubWorkflowJobRunDurationSeconds.With(extraLabel("job_conclusion", *e.WorkflowJob.Conclusion, labels)).Observe(parseResult.RunTime.Seconds())
|
||||
if runTimeSeconds != nil {
|
||||
githubWorkflowJobRunDurationSeconds.With(extraLabel("job_conclusion", *e.WorkflowJob.Conclusion, labels)).Observe(*runTimeSeconds)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,17 +5,17 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyz"
|
||||
|
||||
var (
|
||||
random = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
)
|
||||
|
||||
// Copied from https://stackoverflow.com/a/31832326 with thanks
|
||||
func RandStringBytesRmndr(n int) string {
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]
|
||||
b[i] = letterBytes[random.Int63()%int64(len(letterBytes))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user