mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
32 Commits
actions-ru
...
actions-ru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a5fb6ccb7 | ||
|
|
e930ba6e98 | ||
|
|
5ba3805a3f | ||
|
|
f798cddca1 | ||
|
|
367ee46122 | ||
|
|
f4a318fca6 | ||
|
|
4ee21cb24b | ||
|
|
102c9e1afa | ||
|
|
73e676f951 | ||
|
|
41ebb43c65 | ||
|
|
aa50b62c01 | ||
|
|
942f773fef | ||
|
|
21722a5de8 | ||
|
|
a2d4b95b79 | ||
|
|
04fb9f4fa1 | ||
|
|
8304b80955 | ||
|
|
9bd4025e9c | ||
|
|
94c089c407 | ||
|
|
9859bbc7f2 | ||
|
|
c1e2c4ef9d | ||
|
|
2ee15dbca3 | ||
|
|
a4cf626410 | ||
|
|
58f4b6ff2d | ||
|
|
22fbd10bd3 | ||
|
|
52b97139b6 | ||
|
|
3e0bc3f7be | ||
|
|
ba1ac0990b | ||
|
|
76fe43e8e0 | ||
|
|
8869ad28bb | ||
|
|
b86af190f7 | ||
|
|
1a491cbfe5 | ||
|
|
087f20fd5d |
54
.github/workflows/e2e-test-linux-vm.yaml
vendored
54
.github/workflows/e2e-test-linux-vm.yaml
vendored
@@ -16,11 +16,12 @@ env:
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_REPO: arc_e2e_test_dummy
|
||||
IMAGE_NAME: "arc-test-image"
|
||||
IMAGE_VERSION: "dev"
|
||||
IMAGE_VERSION: "0.4.0"
|
||||
|
||||
jobs:
|
||||
default-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
@@ -55,11 +56,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -84,11 +86,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -107,6 +110,7 @@ jobs:
|
||||
|
||||
single-namespace-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
@@ -143,11 +147,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -172,11 +177,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -195,6 +201,7 @@ jobs:
|
||||
|
||||
dind-mode-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: arc-test-dind-workflow.yaml
|
||||
@@ -229,11 +236,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -259,11 +267,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -282,6 +291,7 @@ jobs:
|
||||
|
||||
kubernetes-mode-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
|
||||
@@ -321,11 +331,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -355,11 +366,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -378,6 +390,7 @@ jobs:
|
||||
|
||||
auth-proxy-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
@@ -412,11 +425,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -453,11 +467,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -476,6 +491,7 @@ jobs:
|
||||
|
||||
anonymous-proxy-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
@@ -510,11 +526,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -545,11 +562,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -568,6 +586,7 @@ jobs:
|
||||
|
||||
self-signed-ca-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
@@ -602,11 +621,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -629,11 +649,12 @@ jobs:
|
||||
cat ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for mitmproxy generate its CA cert"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
sudo cp ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
|
||||
sudo chown runner ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
|
||||
@@ -661,11 +682,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
|
||||
44
.github/workflows/publish-chart.yaml
vendored
44
.github/workflows/publish-chart.yaml
vendored
@@ -14,6 +14,12 @@ on:
|
||||
- '!charts/gha-runner-scale-set/**'
|
||||
- '!**.md'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
force:
|
||||
description: 'Force publish even if the chart version is not bumped'
|
||||
type: boolean
|
||||
required: true
|
||||
default: false
|
||||
|
||||
env:
|
||||
KUBE_SCORE_VERSION: 1.10.0
|
||||
@@ -45,20 +51,12 @@ jobs:
|
||||
chmod 755 kube-score
|
||||
|
||||
- name: Kube-score generated manifests
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
|
||||
--ignore-test pod-networkpolicy
|
||||
--ignore-test deployment-has-poddisruptionbudget
|
||||
--ignore-test deployment-has-host-podantiaffinity
|
||||
--ignore-test container-security-context
|
||||
--ignore-test pod-probes
|
||||
--ignore-test container-image-tag
|
||||
--enable-optional-test container-security-context-privileged
|
||||
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.3.1
|
||||
@@ -98,9 +96,12 @@ jobs:
|
||||
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
|
||||
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
|
||||
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
|
||||
|
||||
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
|
||||
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
|
||||
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION ]]; then
|
||||
|
||||
# Always publish if force is true
|
||||
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
|
||||
echo "publish=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "publish=false" >> $GITHUB_OUTPUT
|
||||
@@ -170,27 +171,10 @@ jobs:
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--index-path ${{ github.workspace }}/index.yaml \
|
||||
--push \
|
||||
--pages-branch 'gh-pages' \
|
||||
--pages-index-path 'index.yaml'
|
||||
|
||||
# This step is required to not throw away changes made to the index.yaml on every new chart release.
|
||||
#
|
||||
# We update the index.yaml in the actions-runner-controller.github.io repo
|
||||
# by appending the new chart version to the index.yaml saved in actions-runner-controller repo
|
||||
# and copying and commiting the updated index.yaml to the github.io one.
|
||||
# See below for more context:
|
||||
# - https://github.com/actions-runner-controller/actions-runner-controller.github.io/pull/2
|
||||
# - https://github.com/actions/actions-runner-controller/pull/2452
|
||||
- name: Commit and push to actions/actions-runner-controller
|
||||
run: |
|
||||
git checkout gh-pages
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
git add .
|
||||
git commit -m "Update index.yaml"
|
||||
git push
|
||||
working-directory: ${{ github.workspace }}
|
||||
|
||||
# Chart Release was never intended to publish to a different repo
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
@@ -220,4 +204,4 @@ jobs:
|
||||
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/main/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
@@ -46,6 +46,13 @@ jobs:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
|
||||
- name: Check chart versions
|
||||
# Binary version and chart versions need to match.
|
||||
# In case of an upgrade, the controller will try to clean up
|
||||
# resources with older versions that should have been cleaned up
|
||||
# during the upgrade process
|
||||
run: ./hack/check-gh-chart-versions.sh ${{ inputs.release_tag_name }}
|
||||
|
||||
- name: Resolve parameters
|
||||
id: resolve_parameters
|
||||
run: |
|
||||
|
||||
21
.github/workflows/release-runners.yaml
vendored
21
.github/workflows/release-runners.yaml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Runners
|
||||
name: Release Runner Images
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
@@ -18,7 +18,6 @@ env:
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_WORKFLOW: release-runners.yaml
|
||||
DOCKER_VERSION: 20.10.23
|
||||
RUNNER_CONTAINER_HOOKS_VERSION: 0.2.0
|
||||
|
||||
jobs:
|
||||
build-runners:
|
||||
@@ -27,10 +26,12 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Get runner version
|
||||
id: runner_version
|
||||
id: versions
|
||||
run: |
|
||||
version=$(echo -n $(cat runner/VERSION))
|
||||
echo runner_version=$version >> $GITHUB_OUTPUT
|
||||
runner_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))"
|
||||
container_hooks_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))"
|
||||
echo runner_version=$runner_current_version >> $GITHUB_OUTPUT
|
||||
echo container_hooks_version=$container_hooks_current_version >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
@@ -42,7 +43,8 @@ jobs:
|
||||
|
||||
- name: Trigger Build And Push Runner Images To Registries
|
||||
env:
|
||||
RUNNER_VERSION: ${{ steps.runner_version.outputs.runner_version }}
|
||||
RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }}
|
||||
CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }}
|
||||
run: |
|
||||
# Authenticate
|
||||
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
|
||||
@@ -51,20 +53,21 @@ jobs:
|
||||
gh workflow run ${{ env.TARGET_WORKFLOW }} -R ${{ env.TARGET_ORG }}/releases \
|
||||
-f runner_version=${{ env.RUNNER_VERSION }} \
|
||||
-f docker_version=${{ env.DOCKER_VERSION }} \
|
||||
-f runner_container_hooks_version=${{ env.RUNNER_CONTAINER_HOOKS_VERSION }} \
|
||||
-f runner_container_hooks_version=${{ env.CONTAINER_HOOKS_VERSION }} \
|
||||
-f sha='${{ github.sha }}' \
|
||||
-f push_to_registries=${{ env.PUSH_TO_REGISTRIES }}
|
||||
|
||||
- name: Job summary
|
||||
env:
|
||||
RUNNER_VERSION: ${{ steps.runner_version.outputs.runner_version }}
|
||||
RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }}
|
||||
CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }}
|
||||
run: |
|
||||
echo "The [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- runner_version: ${{ env.RUNNER_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- docker_version: ${{ env.DOCKER_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- runner_container_hooks_version: ${{ env.RUNNER_CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- runner_container_hooks_version: ${{ env.CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- push_to_registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
80
.github/workflows/update-runners.yaml
vendored
80
.github/workflows/update-runners.yaml
vendored
@@ -16,21 +16,34 @@ jobs:
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
outputs:
|
||||
current_version: ${{ steps.versions.outputs.current_version }}
|
||||
latest_version: ${{ steps.versions.outputs.latest_version }}
|
||||
runner_current_version: ${{ steps.runner_versions.outputs.runner_current_version }}
|
||||
runner_latest_version: ${{ steps.runner_versions.outputs.runner_latest_version }}
|
||||
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
|
||||
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get current and latest versions
|
||||
id: versions
|
||||
- name: Get runner current and latest versions
|
||||
id: runner_versions
|
||||
run: |
|
||||
CURRENT_VERSION=$(echo -n $(cat runner/VERSION))
|
||||
CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))"
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
|
||||
echo runner_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner | grep -oP '(?<=v)[0-9.]+' | head -1)
|
||||
echo "Latest version: $LATEST_VERSION"
|
||||
echo latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
|
||||
echo runner_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get container-hooks current and latest versions
|
||||
id: container_hooks_versions
|
||||
run: |
|
||||
CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))"
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo container_hooks_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner-container-hooks | grep -oP '(?<=v)[0-9.]+' | head -1)
|
||||
echo "Latest version: $LATEST_VERSION"
|
||||
echo container_hooks_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
# check_pr checks if a PR for the same update already exists. It only runs if
|
||||
# runner latest version != our current version. If no existing PR is found,
|
||||
@@ -38,7 +51,7 @@ jobs:
|
||||
check_pr:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check_versions
|
||||
if: needs.check_versions.outputs.current_version != needs.check_versions.outputs.latest_version
|
||||
if: needs.check_versions.outputs.runner_current_version != needs.check_versions.outputs.runner_latest_version || needs.check_versions.outputs.container_hooks_current_version != needs.check_versions.outputs.container_hooks_latest_version
|
||||
outputs:
|
||||
pr_name: ${{ steps.pr_name.outputs.pr_name }}
|
||||
env:
|
||||
@@ -46,16 +59,35 @@ jobs:
|
||||
steps:
|
||||
- name: debug
|
||||
run:
|
||||
echo ${{ needs.check_versions.outputs.current_version }}
|
||||
echo ${{ needs.check_versions.outputs.latest_version }}
|
||||
echo "RUNNER_CURRENT_VERSION=${{ needs.check_versions.outputs.runner_current_version }}"
|
||||
echo "RUNNER_LATEST_VERSION=${{ needs.check_versions.outputs.runner_latest_version }}"
|
||||
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
|
||||
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: PR Name
|
||||
id: pr_name
|
||||
env:
|
||||
LATEST_VERSION: ${{ needs.check_versions.outputs.latest_version }}
|
||||
RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }}
|
||||
RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }}
|
||||
CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }}
|
||||
CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }}
|
||||
# Generate a PR name with the following title:
|
||||
# Updates: runner to v2.304.0 and container-hooks to v0.3.1
|
||||
run: |
|
||||
PR_NAME="Update runner to version ${LATEST_VERSION}"
|
||||
RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}"
|
||||
CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}"
|
||||
|
||||
PR_NAME="Updates:"
|
||||
if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ]
|
||||
then
|
||||
PR_NAME="$PR_NAME $RUNNER_MESSAGE"
|
||||
fi
|
||||
if [ "$CONTAINER_HOOKS_CURRENT_VERSION" != "$CONTAINER_HOOKS_LATEST_VERSION" ]
|
||||
then
|
||||
PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE"
|
||||
fi
|
||||
|
||||
result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1)
|
||||
if [ -z "$result" ]
|
||||
@@ -80,21 +112,29 @@ jobs:
|
||||
actions: write
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
CURRENT_VERSION: ${{ needs.check_versions.outputs.current_version }}
|
||||
LATEST_VERSION: ${{ needs.check_versions.outputs.latest_version }}
|
||||
RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }}
|
||||
RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }}
|
||||
CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }}
|
||||
CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }}
|
||||
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: New branch
|
||||
run: git checkout -b update-runner-$LATEST_VERSION
|
||||
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
|
||||
|
||||
- name: Update files
|
||||
run: |
|
||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
|
||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
|
||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
|
||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
|
||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" .github/workflows/e2e-test-linux-vm.yaml
|
||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/VERSION
|
||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/Makefile
|
||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" Makefile
|
||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" test/e2e/e2e_test.go
|
||||
|
||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/VERSION
|
||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/Makefile
|
||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" Makefile
|
||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" test/e2e/e2e_test.go
|
||||
|
||||
- name: Commit changes
|
||||
run: |
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -35,3 +35,5 @@ bin
|
||||
.DS_STORE
|
||||
|
||||
/test-assets
|
||||
|
||||
/.tools
|
||||
|
||||
4
Makefile
4
Makefile
@@ -5,7 +5,7 @@ else
|
||||
endif
|
||||
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
||||
VERSION ?= dev
|
||||
RUNNER_VERSION ?= 2.303.0
|
||||
RUNNER_VERSION ?= 2.304.0
|
||||
TARGETPLATFORM ?= $(shell arch)
|
||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||
RUNNER_TAG ?= ${VERSION}
|
||||
@@ -202,7 +202,7 @@ generate: controller-gen
|
||||
|
||||
# Run shellcheck on runner scripts
|
||||
shellcheck: shellcheck-install
|
||||
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh
|
||||
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh hack/*.sh
|
||||
|
||||
docker-buildx:
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled ;\
|
||||
|
||||
@@ -6,17 +6,14 @@
|
||||
|
||||
## People
|
||||
|
||||
`actions-runner-controller` is an open-source project currently developed and maintained in collaboration with maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions), mostly in their spare time.
|
||||
`actions-runner-controller` is an open-source project currently developed and maintained in collaboration with the GitHub Actions team, external maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions).
|
||||
|
||||
If you think the project is awesome and it's becoming a basis for your important business, consider [sponsoring us](https://github.com/sponsors/actions-runner-controller)!
|
||||
If you think the project is awesome and is adding value to your business, please consider directly sponsoring [community maintainers](https://github.com/sponsors/actions-runner-controller) and individual contributors via GitHub Sponsors.
|
||||
|
||||
In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means!
|
||||
|
||||
We don't currently have [any sponsors dedicated to this project yet](https://github.com/sponsors/actions-runner-controller).
|
||||
|
||||
However, [HelloFresh](https://www.hellofreshgroup.com/en/) has recently started sponsoring @mumoshu for this project along with his other works. A part of their sponsorship will enable @mumoshu to add an E2E test to keep ARC even more reliable on AWS. Thank you for your sponsorship!
|
||||
|
||||
[<img src="https://user-images.githubusercontent.com/22009/170898715-07f02941-35ec-418b-8cd4-251b422fa9ac.png" width="219" height="71" />](https://careers.hellofresh.com/)
|
||||
See [the sponsorship dashboard](https://github.com/sponsors/actions-runner-controller) for the former and the current sponsors.
|
||||
|
||||
## Status
|
||||
|
||||
|
||||
@@ -102,6 +102,7 @@ if [ "${tool}" == "helm" ]; then
|
||||
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
||||
--set actionsMetricsServer.podAnnotations.test-id=${TEST_ID} \
|
||||
${flags[@]} --set image.imagePullPolicy=${IMAGE_PULL_POLICY} \
|
||||
--set image.dindSidecarRepositoryAndTag=${DIND_SIDECAR_REPOSITORY_AND_TAG} \
|
||||
-f ${VALUES_FILE}
|
||||
set +v
|
||||
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
||||
|
||||
@@ -6,6 +6,10 @@ OP=${OP:-apply}
|
||||
|
||||
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
||||
|
||||
# See https://github.com/actions/actions-runner-controller/issues/2123
|
||||
kubectl delete secret generic docker-config || :
|
||||
kubectl create secret generic docker-config --from-file .dockerconfigjson=<(jq -M 'del(.aliases)' $HOME/.docker/config.json) --type=kubernetes.io/dockerconfigjson || :
|
||||
|
||||
cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=${RUNNER_NAMESPACE} envsubst | kubectl apply -f -
|
||||
|
||||
if [ -n "${TEST_REPO}" ]; then
|
||||
|
||||
27
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
27
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
@@ -95,6 +95,24 @@ spec:
|
||||
# that part is created by dockerd.
|
||||
mountPath: /home/runner/.local
|
||||
readOnly: false
|
||||
# See https://github.com/actions/actions-runner-controller/issues/2123
|
||||
# Be sure to omit the "aliases" field from the config.json.
|
||||
# Otherwise you may encounter nasty errors like:
|
||||
# $ docker build
|
||||
# docker: 'buildx' is not a docker command.
|
||||
# See 'docker --help'
|
||||
# due to the incompatibility between your host docker config.json and the runner environment.
|
||||
# That is, your host dockcer config.json might contain this:
|
||||
# "aliases": {
|
||||
# "builder": "buildx"
|
||||
# }
|
||||
# And this results in the above error when the runner does not have buildx installed yet.
|
||||
- name: docker-config
|
||||
mountPath: /home/runner/.docker/config.json
|
||||
subPath: config.json
|
||||
readOnly: true
|
||||
- name: docker-config-root
|
||||
mountPath: /home/runner/.docker
|
||||
volumes:
|
||||
- name: rootless-dind-work-dir
|
||||
ephemeral:
|
||||
@@ -105,6 +123,15 @@ spec:
|
||||
resources:
|
||||
requests:
|
||||
storage: 3Gi
|
||||
- name: docker-config
|
||||
# Refer to .dockerconfigjson/.docker/config.json
|
||||
secret:
|
||||
secretName: docker-config
|
||||
items:
|
||||
- key: .dockerconfigjson
|
||||
path: config.json
|
||||
- name: docker-config-root
|
||||
emptyDir: {}
|
||||
|
||||
#
|
||||
# Non-standard working directory
|
||||
|
||||
@@ -15,10 +15,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.23.1
|
||||
version: 0.23.3
|
||||
|
||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||
appVersion: 0.27.2
|
||||
appVersion: 0.27.4
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `metrics.port` | Set port of metrics service | 8443 |
|
||||
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
||||
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
|
||||
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
|
||||
| `fullnameOverride` | Override the full resource names | |
|
||||
@@ -102,8 +102,11 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
|
||||
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
|
||||
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
|
||||
| `githubWebhookServer.terminationGracePeriodSeconds` | Set the githubWebhookServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
|
||||
| `githubWebhookServer.lifecycle` | Set the githubWebhookServer pod lifecycle hooks | `{}` |
|
||||
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
|
||||
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||
| `githubWebhookServer.service.loadBalancerSourceRanges` | Set githubWebhookServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
|
||||
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
|
||||
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||
@@ -115,9 +118,9 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | |
|
||||
| `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text |
|
||||
| `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false |
|
||||
| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
|
||||
| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the actions-metrics-server | false |
|
||||
| `actionsMetricsServer.secret.create` | Deploy the webhook hook secret | false |
|
||||
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
|
||||
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | actions-metrics-server |
|
||||
| `actionsMetricsServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
||||
| `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | |
|
||||
| `actionsMetricsServer.nameOverride` | Override the resource name prefix | |
|
||||
@@ -135,8 +138,11 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | |
|
||||
| `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | |
|
||||
| `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | |
|
||||
| `actionsMetricsServer.terminationGracePeriodSeconds` | Set the actionsMetricsServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
|
||||
| `actionsMetricsServer.lifecycle` | Set the actionsMetricsServer pod lifecycle hooks | `{}` |
|
||||
| `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | |
|
||||
| `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||
| `actionsMetricsServer.service.loadBalancerSourceRanges` | Set actionsMetricsServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
|
||||
| `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false |
|
||||
| `actionsMetricsServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||
| `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||
@@ -147,5 +153,5 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
|
||||
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||
| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||
| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
||||
| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
|
||||
| `actionsMetrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||
|
||||
@@ -50,6 +50,12 @@ spec:
|
||||
{{- end }}
|
||||
command:
|
||||
- "/actions-metrics-server"
|
||||
{{- if .Values.actionsMetricsServer.lifecycle }}
|
||||
{{- with .Values.actionsMetricsServer.lifecycle }}
|
||||
lifecycle:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||
valueFrom:
|
||||
@@ -142,7 +148,7 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 10
|
||||
terminationGracePeriodSeconds: {{ .Values.actionsMetricsServer.terminationGracePeriodSeconds }}
|
||||
{{- with .Values.actionsMetricsServer.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
||||
@@ -0,0 +1,90 @@
|
||||
{{- if .Values.actionsMetricsServer.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- horizontalrunnerautoscalers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- horizontalrunnerautoscalers/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- horizontalrunnerautoscalers/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnersets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnerdeployments
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnerdeployments/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnerdeployments/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
{{- end }}
|
||||
@@ -0,0 +1,14 @@
|
||||
{{- if .Values.actionsMetricsServer.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }}
|
||||
{{- if .Values.actionsMetricsServer.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.actionsMetricsServer.service.annotations | nindent 4 }}
|
||||
@@ -23,4 +23,10 @@ spec:
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }}
|
||||
{{- if .Values.actionsMetricsServer.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{- range $ip := .Values.actionsMetricsServer.service.loadBalancerSourceRanges }}
|
||||
- {{ $ip -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -47,6 +47,7 @@ authSecret:
|
||||
#github_basicauth_username: ""
|
||||
#github_basicauth_password: ""
|
||||
|
||||
# http(s) should be specified for dockerRegistryMirror, e.g.: dockerRegistryMirror="https://<your-docker-registry-mirror>"
|
||||
dockerRegistryMirror: ""
|
||||
image:
|
||||
repository: "summerwind/actions-runner-controller"
|
||||
@@ -359,6 +360,7 @@ actionsMetricsServer:
|
||||
protocol: TCP
|
||||
name: http
|
||||
#nodePort: someFixedPortForUseWithTerraformCdkCfnEtc
|
||||
loadBalancerSourceRanges: []
|
||||
ingress:
|
||||
enabled: false
|
||||
ingressClassName: ""
|
||||
@@ -388,4 +390,5 @@ actionsMetricsServer:
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
terminationGracePeriodSeconds: 10
|
||||
lifecycle: {}
|
||||
|
||||
@@ -78,6 +78,13 @@ rules:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
|
||||
@@ -52,6 +52,13 @@ rules:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
|
||||
@@ -169,7 +169,7 @@ func TestTemplate_CreateManagerClusterRole(t *testing.T) {
|
||||
|
||||
assert.Empty(t, managerClusterRole.Namespace, "ClusterRole should not have a namespace")
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRole.Name)
|
||||
assert.Equal(t, 15, len(managerClusterRole.Rules))
|
||||
assert.Equal(t, 16, len(managerClusterRole.Rules))
|
||||
|
||||
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"})
|
||||
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_controller_role.yaml in chart", "We should get an error because the template should be skipped")
|
||||
@@ -843,7 +843,7 @@ func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) {
|
||||
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRole.Name)
|
||||
assert.Equal(t, "demo", managerSingleNamespaceWatchRole.Namespace)
|
||||
assert.Equal(t, 13, len(managerSingleNamespaceWatchRole.Rules))
|
||||
assert.Equal(t, 14, len(managerSingleNamespaceWatchRole.Rules))
|
||||
}
|
||||
|
||||
func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) {
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: summerwind/actions-runner-controller
|
||||
newTag: dev
|
||||
newTag: latest
|
||||
|
||||
replacements:
|
||||
- path: env-replacement.yaml
|
||||
|
||||
@@ -102,6 +102,13 @@ rules:
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
|
||||
@@ -31,6 +31,6 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `autoscaler.enabled` | Enable the HorizontalRunnerAutoscaler, if its enabled then replica count will not be used | true |
|
||||
| `autoscaler.minReplicas` | Minimum no of replicas | 1 |
|
||||
| `autoscaler.maxReplicas` | Maximum no of replicas | 5 |
|
||||
| `autoscaler.scaleDownDelaySecondsAfterScaleOut` | [Anti-Flapping Configuration](https://github.com/actions/actions-runner-controller#anti-flapping-configuration) | 120 |
|
||||
| `autoscaler.metrics` | [Pull driven scaling](https://github.com/actions/actions-runner-controller#pull-driven-scaling) | default |
|
||||
| `autoscaler.scaleUpTriggers` | [Webhook driven scaling](https://github.com/actions/actions-runner-controller#webhook-driven-scaling) | |
|
||||
| `autoscaler.scaleDownDelaySecondsAfterScaleOut` | [Anti-Flapping Configuration](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#anti-flapping-configuration) | 120 |
|
||||
| `autoscaler.metrics` | [Pull driven scaling](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#pull-driven-scaling) | default |
|
||||
| `autoscaler.scaleUpTriggers` | [Webhook driven scaling](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#webhook-driven-scaling) | |
|
||||
|
||||
@@ -17,7 +17,7 @@ runnerLabels:
|
||||
replicaCount: 1
|
||||
|
||||
# The Runner Group that the runner(s) should be associated with.
|
||||
# See https://docs.github.com/en/github-ae@latest/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups.
|
||||
# See https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/managing-access-to-self-hosted-runners-using-groups.
|
||||
group: Default
|
||||
|
||||
autoscaler:
|
||||
|
||||
@@ -41,7 +41,6 @@ import (
|
||||
|
||||
const (
|
||||
autoscalingListenerContainerName = "autoscaler"
|
||||
autoscalingListenerOwnerKey = ".metadata.controller"
|
||||
autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer"
|
||||
)
|
||||
|
||||
@@ -246,65 +245,6 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
groupVersionIndexer := func(rawObj client.Object) []string {
|
||||
groupVersion := v1alpha1.GroupVersion.String()
|
||||
owner := metav1.GetControllerOf(rawObj)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...make sure it is owned by this controller
|
||||
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
|
||||
var requests []reconcile.Request
|
||||
labels := obj.GetLabels()
|
||||
namespace, ok := labels["auto-scaling-listener-namespace"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
name, ok := labels["auto-scaling-listener-name"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
requests = append(requests,
|
||||
reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
)
|
||||
return requests
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.AutoscalingListener{}).
|
||||
Owns(&corev1.Pod{}).
|
||||
Owns(&corev1.ServiceAccount{}).
|
||||
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) {
|
||||
logger.Info("Cleaning up the listener pod")
|
||||
listenerPod := new(corev1.Pod)
|
||||
@@ -615,3 +555,62 @@ func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context
|
||||
"serviceAccount", serviceAccount.Name)
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
groupVersionIndexer := func(rawObj client.Object) []string {
|
||||
groupVersion := v1alpha1.GroupVersion.String()
|
||||
owner := metav1.GetControllerOf(rawObj)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...make sure it is owned by this controller
|
||||
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, resourceOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, resourceOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
|
||||
var requests []reconcile.Request
|
||||
labels := obj.GetLabels()
|
||||
namespace, ok := labels["auto-scaling-listener-namespace"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
name, ok := labels["auto-scaling-listener-name"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
requests = append(requests,
|
||||
reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
)
|
||||
return requests
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.AutoscalingListener{}).
|
||||
Owns(&corev1.Pod{}).
|
||||
Owns(&corev1.ServiceAccount{}).
|
||||
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@@ -213,7 +213,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
Eventually(
|
||||
func() error {
|
||||
podList := new(corev1.PodList)
|
||||
err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name})
|
||||
err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingListener.Name})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -231,7 +231,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
Eventually(
|
||||
func() error {
|
||||
serviceAccountList := new(corev1.ServiceAccountList)
|
||||
err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name})
|
||||
err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingListener.Name})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -42,13 +43,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO: Replace with shared image.
|
||||
autoscalingRunnerSetOwnerKey = ".metadata.controller"
|
||||
LabelKeyRunnerSpecHash = "runner-spec-hash"
|
||||
labelKeyRunnerSpecHash = "runner-spec-hash"
|
||||
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
|
||||
runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
|
||||
runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
|
||||
autoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
|
||||
)
|
||||
|
||||
// AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object
|
||||
@@ -139,6 +137,22 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion] != build.Version {
|
||||
if err := r.Delete(ctx, autoscalingRunnerSet); err != nil {
|
||||
log.Error(err, "Failed to delete autoscaling runner set on version mismatch",
|
||||
"targetVersion", build.Version,
|
||||
"actualVersion", autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
log.Info("Autoscaling runner set version doesn't match the build version. Deleting the resource.",
|
||||
"targetVersion", build.Version,
|
||||
"actualVersion", autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) {
|
||||
log.Info("Adding finalizer")
|
||||
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||
@@ -201,10 +215,10 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
|
||||
desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||
for _, runnerSet := range existingRunnerSets.all() {
|
||||
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[LabelKeyRunnerSpecHash])
|
||||
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[labelKeyRunnerSpecHash])
|
||||
}
|
||||
|
||||
if desiredSpecHash != latestRunnerSet.Labels[LabelKeyRunnerSpecHash] {
|
||||
if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] {
|
||||
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set")
|
||||
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
|
||||
}
|
||||
@@ -232,7 +246,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
}
|
||||
|
||||
// Our listener pod is out of date, so we need to delete it to get a new recreate.
|
||||
if listener.Labels[LabelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() {
|
||||
if listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() {
|
||||
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
|
||||
if err := r.Delete(ctx, listener); err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
@@ -601,7 +615,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) {
|
||||
list := new(v1alpha1.EphemeralRunnerSetList)
|
||||
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingRunnerSet.Name}); err != nil {
|
||||
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingRunnerSet.Name}); err != nil {
|
||||
return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
|
||||
}
|
||||
|
||||
@@ -694,7 +708,7 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro
|
||||
return []string{owner.Name}
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, autoscalingRunnerSetOwnerKey, groupVersionIndexer); err != nil {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, resourceOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -754,12 +768,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(roleBinding, autoscalingRunnerSetCleanupFinalizerName) {
|
||||
if !controllerutil.ContainsFinalizer(roleBinding, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Kubernetes mode role binding finalizer has already been removed", "name", roleBindingName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err)
|
||||
@@ -797,12 +811,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: roleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(role, autoscalingRunnerSetCleanupFinalizerName) {
|
||||
if !controllerutil.ContainsFinalizer(role, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Kubernetes mode role finalizer has already been removed", "name", roleName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err)
|
||||
@@ -841,12 +855,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeSer
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(serviceAccount, autoscalingRunnerSetCleanupFinalizerName) {
|
||||
if !controllerutil.ContainsFinalizer(serviceAccount, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Kubernetes mode service account finalizer has already been removed", "name", serviceAccountName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err)
|
||||
@@ -885,12 +899,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServi
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(serviceAccount, autoscalingRunnerSetCleanupFinalizerName) {
|
||||
if !controllerutil.ContainsFinalizer(serviceAccount, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("No permission service account finalizer has already been removed", "name", serviceAccountName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err)
|
||||
@@ -929,12 +943,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinal
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: githubSecretName, Namespace: c.autoscalingRunnerSet.Namespace}, githubSecret)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(githubSecret, autoscalingRunnerSetCleanupFinalizerName) {
|
||||
if !controllerutil.ContainsFinalizer(githubSecret, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("GitHub secret finalizer has already been removed", "name", githubSecretName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, githubSecret, func(obj *corev1.Secret) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err)
|
||||
@@ -973,12 +987,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindin
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(roleBinding, autoscalingRunnerSetCleanupFinalizerName) {
|
||||
if !controllerutil.ContainsFinalizer(roleBinding, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Manager role binding finalizer has already been removed", "name", managerRoleBindingName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err)
|
||||
@@ -1017,12 +1031,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinali
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(role, autoscalingRunnerSetCleanupFinalizerName) {
|
||||
if !controllerutil.ContainsFinalizer(role, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Manager role finalizer has already been removed", "name", managerRoleName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err)
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/github/actions/fake"
|
||||
"github.com/actions/actions-runner-controller/github/actions/testserver"
|
||||
@@ -38,13 +39,25 @@ const (
|
||||
autoscalingRunnerSetTestGitHubToken = "gh_token"
|
||||
)
|
||||
|
||||
var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
||||
var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
||||
var ctx context.Context
|
||||
var mgr ctrl.Manager
|
||||
var autoscalingNS *corev1.Namespace
|
||||
var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
|
||||
var configSecret *corev1.Secret
|
||||
|
||||
var originalBuildVersion string
|
||||
buildVersion := "0.1.0"
|
||||
|
||||
BeforeAll(func() {
|
||||
originalBuildVersion = build.Version
|
||||
build.Version = buildVersion
|
||||
})
|
||||
|
||||
AfterAll(func() {
|
||||
build.Version = originalBuildVersion
|
||||
})
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx = context.Background()
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
@@ -67,6 +80,9 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
@@ -280,10 +296,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
||||
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
|
||||
}
|
||||
|
||||
return runnerSetList.Items[0].Labels[LabelKeyRunnerSpecHash], nil
|
||||
return runnerSetList.Items[0].Labels[labelKeyRunnerSpecHash], nil
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[LabelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
|
||||
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[labelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
|
||||
|
||||
// We should create a new listener
|
||||
Eventually(
|
||||
@@ -474,7 +490,19 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test AutoScalingController updates", func() {
|
||||
var _ = Describe("Test AutoScalingController updates", Ordered, func() {
|
||||
var originalBuildVersion string
|
||||
buildVersion := "0.1.0"
|
||||
|
||||
BeforeAll(func() {
|
||||
originalBuildVersion = build.Version
|
||||
build.Version = buildVersion
|
||||
})
|
||||
|
||||
AfterAll(func() {
|
||||
build.Version = originalBuildVersion
|
||||
})
|
||||
|
||||
Context("Creating autoscaling runner set with RunnerScaleSetName set", func() {
|
||||
var ctx context.Context
|
||||
var mgr ctrl.Manager
|
||||
@@ -483,6 +511,7 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
||||
var configSecret *corev1.Secret
|
||||
|
||||
BeforeEach(func() {
|
||||
originalBuildVersion = build.Version
|
||||
ctx = context.Background()
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
@@ -528,6 +557,9 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
@@ -598,7 +630,18 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test AutoscalingController creation failures", func() {
|
||||
var _ = Describe("Test AutoscalingController creation failures", Ordered, func() {
|
||||
var originalBuildVersion string
|
||||
buildVersion := "0.1.0"
|
||||
|
||||
BeforeAll(func() {
|
||||
originalBuildVersion = build.Version
|
||||
build.Version = buildVersion
|
||||
})
|
||||
|
||||
AfterAll(func() {
|
||||
build.Version = originalBuildVersion
|
||||
})
|
||||
Context("When autoscaling runner set creation fails on the client", func() {
|
||||
var ctx context.Context
|
||||
var mgr ctrl.Manager
|
||||
@@ -629,6 +672,9 @@ var _ = Describe("Test AutoscalingController creation failures", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
@@ -707,7 +753,18 @@ var _ = Describe("Test AutoscalingController creation failures", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test Client optional configuration", func() {
|
||||
var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||
var originalBuildVersion string
|
||||
buildVersion := "0.1.0"
|
||||
|
||||
BeforeAll(func() {
|
||||
originalBuildVersion = build.Version
|
||||
build.Version = buildVersion
|
||||
})
|
||||
|
||||
AfterAll(func() {
|
||||
build.Version = originalBuildVersion
|
||||
})
|
||||
Context("When specifying a proxy", func() {
|
||||
var ctx context.Context
|
||||
var mgr ctrl.Manager
|
||||
@@ -747,6 +804,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "http://example.com/org/repo",
|
||||
@@ -823,6 +883,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "http://example.com/org/repo",
|
||||
@@ -939,6 +1002,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: server.ConfigURLForOrg("my-org"),
|
||||
@@ -989,6 +1055,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
@@ -1050,6 +1119,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
@@ -1102,7 +1174,19 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test external permissions cleanup", func() {
|
||||
var _ = Describe("Test external permissions cleanup", Ordered, func() {
|
||||
var originalBuildVersion string
|
||||
buildVersion := "0.1.0"
|
||||
|
||||
BeforeAll(func() {
|
||||
originalBuildVersion = build.Version
|
||||
build.Version = buildVersion
|
||||
})
|
||||
|
||||
AfterAll(func() {
|
||||
build.Version = originalBuildVersion
|
||||
})
|
||||
|
||||
It("Should clean up kubernetes mode permissions", func() {
|
||||
ctx := context.Background()
|
||||
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
|
||||
@@ -1130,6 +1214,7 @@ var _ = Describe("Test external permissions cleanup", func() {
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "gha-runner-scale-set",
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
AnnotationKeyKubernetesModeRoleBindingName: "kube-mode-role-binding",
|
||||
@@ -1160,7 +1245,7 @@ var _ = Describe("Test external permissions cleanup", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName},
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1171,7 +1256,7 @@ var _ = Describe("Test external permissions cleanup", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName},
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1182,7 +1267,7 @@ var _ = Describe("Test external permissions cleanup", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName},
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
@@ -1287,6 +1372,7 @@ var _ = Describe("Test external permissions cleanup", func() {
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "gha-runner-scale-set",
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
AnnotationKeyManagerRoleName: "manager-role",
|
||||
@@ -1317,7 +1403,7 @@ var _ = Describe("Test external permissions cleanup", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName},
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"github_token": []byte(defaultGitHubToken),
|
||||
@@ -1333,7 +1419,7 @@ var _ = Describe("Test external permissions cleanup", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName},
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1344,7 +1430,7 @@ var _ = Describe("Test external permissions cleanup", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName},
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
@@ -1360,7 +1446,7 @@ var _ = Describe("Test external permissions cleanup", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName},
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1465,3 +1551,76 @@ var _ = Describe("Test external permissions cleanup", func() {
|
||||
).Should(BeTrue(), "Expected role to be cleaned up")
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test resource version and build version mismatch", func() {
|
||||
It("Should delete and recreate the autoscaling runner set to match the build version", func() {
|
||||
ctx := context.Background()
|
||||
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
|
||||
|
||||
configSecret := createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
controller := &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
|
||||
originalVersion := build.Version
|
||||
defer func() {
|
||||
build.Version = originalVersion
|
||||
}()
|
||||
build.Version = "0.2.0"
|
||||
|
||||
min := 1
|
||||
max := 10
|
||||
autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "gha-runner-scale-set",
|
||||
"app.kubernetes.io/version": "0.1.0",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
AnnotationKeyKubernetesModeRoleBindingName: "kube-mode-role-binding",
|
||||
AnnotationKeyKubernetesModeRoleName: "kube-mode-role",
|
||||
AnnotationKeyKubernetesModeServiceAccountName: "kube-mode-service-account",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
MaxRunners: &max,
|
||||
MinRunners: &min,
|
||||
RunnerGroup: "testgroup",
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner",
|
||||
Image: "ghcr.io/actions/runner",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// create autoscaling runner set before starting a manager
|
||||
err = k8sClient.Create(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
startManagers(GinkgoT(), mgr)
|
||||
|
||||
Eventually(func() bool {
|
||||
ars := new(v1alpha1.AutoscalingRunnerSet)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Name}, ars)
|
||||
return errors.IsNotFound(err)
|
||||
}).Should(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package actionsgithubcom
|
||||
|
||||
import corev1 "k8s.io/api/core/v1"
|
||||
|
||||
const (
|
||||
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
||||
LabelKeyPodTemplateHash = "pod-template-hash"
|
||||
@@ -16,3 +18,47 @@ const (
|
||||
EnvVarHTTPSProxy = "https_proxy"
|
||||
EnvVarNoProxy = "no_proxy"
|
||||
)
|
||||
|
||||
// Labels applied to resources
|
||||
const (
|
||||
// Kubernetes labels
|
||||
LabelKeyKubernetesPartOf = "app.kubernetes.io/part-of"
|
||||
LabelKeyKubernetesComponent = "app.kubernetes.io/component"
|
||||
LabelKeyKubernetesVersion = "app.kubernetes.io/version"
|
||||
|
||||
// Github labels
|
||||
LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name"
|
||||
LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace"
|
||||
LabelKeyGitHubEnterprise = "actions.github.com/enterprise"
|
||||
LabelKeyGitHubOrganization = "actions.github.com/organization"
|
||||
LabelKeyGitHubRepository = "actions.github.com/repository"
|
||||
)
|
||||
|
||||
// Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running
|
||||
const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
|
||||
|
||||
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
|
||||
|
||||
// Labels applied to listener roles
|
||||
const (
|
||||
labelKeyListenerName = "auto-scaling-listener-name"
|
||||
labelKeyListenerNamespace = "auto-scaling-listener-namespace"
|
||||
)
|
||||
|
||||
// Annotations applied for later cleanup of resources
|
||||
const (
|
||||
AnnotationKeyManagerRoleBindingName = "actions.github.com/cleanup-manager-role-binding"
|
||||
AnnotationKeyManagerRoleName = "actions.github.com/cleanup-manager-role-name"
|
||||
AnnotationKeyKubernetesModeRoleName = "actions.github.com/cleanup-kubernetes-mode-role-name"
|
||||
AnnotationKeyKubernetesModeRoleBindingName = "actions.github.com/cleanup-kubernetes-mode-role-binding-name"
|
||||
AnnotationKeyKubernetesModeServiceAccountName = "actions.github.com/cleanup-kubernetes-mode-service-account-name"
|
||||
AnnotationKeyGitHubSecretName = "actions.github.com/cleanup-github-secret-name"
|
||||
AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name"
|
||||
)
|
||||
|
||||
// DefaultScaleSetListenerImagePullPolicy is the default pull policy applied
|
||||
// to the listener when ImagePullPolicy is not specified
|
||||
const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent
|
||||
|
||||
// ownerKey is field selector matching the owner name of a particular resource
|
||||
const resourceOwnerKey = ".metadata.controller"
|
||||
|
||||
@@ -40,7 +40,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
ephemeralRunnerSetReconcilerOwnerKey = ".metadata.controller"
|
||||
ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer"
|
||||
)
|
||||
|
||||
@@ -56,6 +55,7 @@ type EphemeralRunnerSetReconciler struct {
|
||||
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/finalizers,verbs=update;patch
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get
|
||||
|
||||
@@ -146,7 +146,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
ctx,
|
||||
ephemeralRunnerList,
|
||||
client.InNamespace(req.Namespace),
|
||||
client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: req.Name},
|
||||
client.MatchingFields{resourceOwnerKey: req.Name},
|
||||
)
|
||||
if err != nil {
|
||||
log.Error(err, "Unable to list child ephemeral runners")
|
||||
@@ -242,7 +242,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, e
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) {
|
||||
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
|
||||
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: ephemeralRunnerSet.Name})
|
||||
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: ephemeralRunnerSet.Name})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list child ephemeral runners: %v", err)
|
||||
}
|
||||
@@ -521,7 +521,7 @@ func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Conte
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
// Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups.
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, ephemeralRunnerSetReconcilerOwnerKey, func(rawObj client.Object) []string {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, resourceOwnerKey, func(rawObj client.Object) []string {
|
||||
groupVersion := v1alpha1.GroupVersion.String()
|
||||
|
||||
// grab the job object, extract the owner...
|
||||
|
||||
@@ -20,40 +20,6 @@ const (
|
||||
jitTokenKey = "jitToken"
|
||||
)
|
||||
|
||||
// Labels applied to resources
|
||||
const (
|
||||
// Kubernetes labels
|
||||
LabelKeyKubernetesPartOf = "app.kubernetes.io/part-of"
|
||||
LabelKeyKubernetesComponent = "app.kubernetes.io/component"
|
||||
LabelKeyKubernetesVersion = "app.kubernetes.io/version"
|
||||
|
||||
// Github labels
|
||||
LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name"
|
||||
LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace"
|
||||
LabelKeyGitHubEnterprise = "actions.github.com/enterprise"
|
||||
LabelKeyGitHubOrganization = "actions.github.com/organization"
|
||||
LabelKeyGitHubRepository = "actions.github.com/repository"
|
||||
)
|
||||
|
||||
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
|
||||
|
||||
// Labels applied to listener roles
|
||||
const (
|
||||
labelKeyListenerName = "auto-scaling-listener-name"
|
||||
labelKeyListenerNamespace = "auto-scaling-listener-namespace"
|
||||
)
|
||||
|
||||
// Annotations applied for later cleanup of resources
|
||||
const (
|
||||
AnnotationKeyManagerRoleBindingName = "actions.github.com/cleanup-manager-role-binding"
|
||||
AnnotationKeyManagerRoleName = "actions.github.com/cleanup-manager-role-name"
|
||||
AnnotationKeyKubernetesModeRoleName = "actions.github.com/cleanup-kubernetes-mode-role-name"
|
||||
AnnotationKeyKubernetesModeRoleBindingName = "actions.github.com/cleanup-kubernetes-mode-role-binding-name"
|
||||
AnnotationKeyKubernetesModeServiceAccountName = "actions.github.com/cleanup-kubernetes-mode-service-account-name"
|
||||
AnnotationKeyGitHubSecretName = "actions.github.com/cleanup-github-secret-name"
|
||||
AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name"
|
||||
)
|
||||
|
||||
var commonLabelKeys = [...]string{
|
||||
LabelKeyKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent,
|
||||
@@ -67,8 +33,6 @@ var commonLabelKeys = [...]string{
|
||||
|
||||
const labelValueKubernetesPartOf = "gha-runner-scale-set"
|
||||
|
||||
const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent
|
||||
|
||||
// scaleSetListenerImagePullPolicy is applied to all listeners
|
||||
var scaleSetListenerImagePullPolicy = DefaultScaleSetListenerImagePullPolicy
|
||||
|
||||
@@ -84,6 +48,62 @@ func SetListenerImagePullPolicy(pullPolicy string) bool {
|
||||
|
||||
type resourceBuilder struct{}
|
||||
|
||||
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
effectiveMinRunners := 0
|
||||
effectiveMaxRunners := math.MaxInt32
|
||||
if autoscalingRunnerSet.Spec.MaxRunners != nil {
|
||||
effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners
|
||||
}
|
||||
if autoscalingRunnerSet.Spec.MinRunners != nil {
|
||||
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
|
||||
}
|
||||
|
||||
githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse github config from url: %v", err)
|
||||
}
|
||||
|
||||
autoscalingListener := &v1alpha1.AutoscalingListener{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerName(autoscalingRunnerSet),
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent: "runner-scale-set-listener",
|
||||
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
LabelKeyGitHubEnterprise: githubConfig.Enterprise,
|
||||
LabelKeyGitHubOrganization: githubConfig.Organization,
|
||||
LabelKeyGitHubRepository: githubConfig.Repository,
|
||||
labelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingListenerSpec{
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
|
||||
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
|
||||
MinRunners: effectiveMinRunners,
|
||||
MaxRunners: effectiveMaxRunners,
|
||||
Image: image,
|
||||
ImagePullPolicy: scaleSetListenerImagePullPolicy,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
},
|
||||
}
|
||||
|
||||
return autoscalingListener, nil
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
|
||||
listenerEnv := []corev1.EnvVar{
|
||||
{
|
||||
@@ -207,54 +227,6 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
||||
return newRunnerScaleSetListenerPod
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||
|
||||
newLabels := map[string]string{
|
||||
LabelKeyRunnerSpecHash: runnerSpecHash,
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent: "runner-set",
|
||||
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
}
|
||||
|
||||
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil {
|
||||
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
|
||||
}
|
||||
|
||||
newAnnotations := map[string]string{
|
||||
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
|
||||
}
|
||||
|
||||
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
|
||||
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
|
||||
Labels: newLabels,
|
||||
Annotations: newAnnotations,
|
||||
},
|
||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||
Replicas: 0,
|
||||
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return newEphemeralRunnerSet, nil
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
|
||||
return &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -344,60 +316,52 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
|
||||
return newListenerSecret
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
||||
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||
|
||||
effectiveMinRunners := 0
|
||||
effectiveMaxRunners := math.MaxInt32
|
||||
if autoscalingRunnerSet.Spec.MaxRunners != nil {
|
||||
effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners
|
||||
}
|
||||
if autoscalingRunnerSet.Spec.MinRunners != nil {
|
||||
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
|
||||
}
|
||||
|
||||
githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse github config from url: %v", err)
|
||||
}
|
||||
|
||||
autoscalingListener := &v1alpha1.AutoscalingListener{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerName(autoscalingRunnerSet),
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||
newLabels := map[string]string{
|
||||
labelKeyRunnerSpecHash: runnerSpecHash,
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent: "runner-scale-set-listener",
|
||||
LabelKeyKubernetesComponent: "runner-set",
|
||||
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
LabelKeyGitHubEnterprise: githubConfig.Enterprise,
|
||||
LabelKeyGitHubOrganization: githubConfig.Organization,
|
||||
LabelKeyGitHubRepository: githubConfig.Repository,
|
||||
LabelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
|
||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
}
|
||||
|
||||
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil {
|
||||
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
|
||||
}
|
||||
|
||||
newAnnotations := map[string]string{
|
||||
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
|
||||
}
|
||||
|
||||
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
|
||||
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
|
||||
Labels: newLabels,
|
||||
Annotations: newAnnotations,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingListenerSpec{
|
||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||
Replicas: 0,
|
||||
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
|
||||
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
|
||||
MinRunners: effectiveMinRunners,
|
||||
MaxRunners: effectiveMaxRunners,
|
||||
Image: image,
|
||||
ImagePullPolicy: scaleSetListenerImagePullPolicy,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return autoscalingListener, nil
|
||||
return newEphemeralRunnerSet, nil
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
|
||||
|
||||
@@ -36,7 +36,7 @@ func TestLabelPropagation(t *testing.T) {
|
||||
assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf])
|
||||
assert.Equal(t, "runner-set", ephemeralRunnerSet.Labels[LabelKeyKubernetesComponent])
|
||||
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], ephemeralRunnerSet.Labels[LabelKeyKubernetesVersion])
|
||||
assert.NotEmpty(t, ephemeralRunnerSet.Labels[LabelKeyRunnerSpecHash])
|
||||
assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash])
|
||||
assert.Equal(t, autoscalingRunnerSet.Name, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName])
|
||||
assert.Equal(t, autoscalingRunnerSet.Namespace, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace])
|
||||
assert.Equal(t, "", ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise])
|
||||
@@ -49,7 +49,7 @@ func TestLabelPropagation(t *testing.T) {
|
||||
assert.Equal(t, labelValueKubernetesPartOf, listener.Labels[LabelKeyKubernetesPartOf])
|
||||
assert.Equal(t, "runner-scale-set-listener", listener.Labels[LabelKeyKubernetesComponent])
|
||||
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], listener.Labels[LabelKeyKubernetesVersion])
|
||||
assert.NotEmpty(t, ephemeralRunnerSet.Labels[LabelKeyRunnerSpecHash])
|
||||
assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash])
|
||||
assert.Equal(t, autoscalingRunnerSet.Name, listener.Labels[LabelKeyGitHubScaleSetName])
|
||||
assert.Equal(t, autoscalingRunnerSet.Namespace, listener.Labels[LabelKeyGitHubScaleSetNamespace])
|
||||
assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise])
|
||||
|
||||
@@ -115,7 +115,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
||||
}()
|
||||
|
||||
// respond ok to GET / e.g. for health check
|
||||
if r.Method == http.MethodGet {
|
||||
if strings.ToUpper(r.Method) == http.MethodGet {
|
||||
ok = true
|
||||
fmt.Fprintln(w, "webhook server is running")
|
||||
return
|
||||
@@ -210,7 +210,16 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
||||
if e.GetAction() == "queued" {
|
||||
target.Amount = 1
|
||||
break
|
||||
} else if e.GetAction() == "completed" && e.GetWorkflowJob().GetConclusion() != "skipped" && e.GetWorkflowJob().GetRunnerID() > 0 {
|
||||
} else if e.GetAction() == "completed" && e.GetWorkflowJob().GetConclusion() != "skipped" {
|
||||
// We want to filter out "completed" events sent by check runs.
|
||||
// See https://github.com/actions/actions-runner-controller/issues/2118
|
||||
// and https://github.com/actions/actions-runner-controller/pull/2119
|
||||
// But canceled events have runner_id == 0 and GetRunnerID() returns 0 when RunnerID == nil,
|
||||
// so we need to be more specific in filtering out the check runs.
|
||||
// See example check run completion at https://gist.github.com/nathanklick/268fea6496a4d7b14cecb2999747ef84
|
||||
if e.GetWorkflowJob().GetConclusion() == "success" && e.GetWorkflowJob().RunnerID == nil {
|
||||
log.V(1).Info("Ignoring workflow_job event because it does not relate to a self-hosted runner")
|
||||
} else {
|
||||
// A negative amount is processed in the tryScale func as a scale-down request,
|
||||
// that erases the oldest CapacityReservation with the same amount.
|
||||
// If the first CapacityReservation was with Replicas=1, this negative scale target erases that,
|
||||
@@ -218,6 +227,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
||||
target.Amount = -1
|
||||
break
|
||||
}
|
||||
}
|
||||
// If the conclusion is "skipped", we will ignore it and fallthrough to the default case.
|
||||
fallthrough
|
||||
default:
|
||||
|
||||
@@ -105,12 +105,14 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
|
||||
Log: logf.Log,
|
||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||
GitHubClient: multiClient,
|
||||
RunnerImage: "example/runner:test",
|
||||
DockerImage: "example/docker:test",
|
||||
Name: controllerName("runner"),
|
||||
RegistrationRecheckInterval: time.Millisecond * 100,
|
||||
RegistrationRecheckJitter: time.Millisecond * 10,
|
||||
UnregistrationRetryDelay: 1 * time.Second,
|
||||
RunnerPodDefaults: RunnerPodDefaults{
|
||||
RunnerImage: "example/runner:test",
|
||||
DockerImage: "example/docker:test",
|
||||
},
|
||||
}
|
||||
err = runnerController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller")
|
||||
|
||||
@@ -15,6 +15,21 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func newRunnerPod(template corev1.Pod, runnerSpec arcv1alpha1.RunnerConfig, githubBaseURL string, d RunnerPodDefaults) (corev1.Pod, error) {
|
||||
return newRunnerPodWithContainerMode("", template, runnerSpec, githubBaseURL, d)
|
||||
}
|
||||
|
||||
func setEnv(c *corev1.Container, name, value string) {
|
||||
for j := range c.Env {
|
||||
e := &c.Env[j]
|
||||
|
||||
if e.Name == name {
|
||||
e.Value = value
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newWorkGenericEphemeralVolume(t *testing.T, storageReq string) corev1.Volume {
|
||||
GBs, err := resource.ParseQuantity(storageReq)
|
||||
if err != nil {
|
||||
@@ -76,7 +91,7 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "docker-sock",
|
||||
Name: "var-run",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{
|
||||
Medium: corev1.StorageMediumMemory,
|
||||
@@ -140,7 +155,7 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Name: "DOCKER_HOST",
|
||||
Value: "unix:///run/docker/docker.sock",
|
||||
Value: "unix:///run/docker.sock",
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
@@ -153,8 +168,8 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
MountPath: "/runner/_work",
|
||||
},
|
||||
{
|
||||
Name: "docker-sock",
|
||||
MountPath: "/run/docker",
|
||||
Name: "var-run",
|
||||
MountPath: "/run",
|
||||
},
|
||||
},
|
||||
ImagePullPolicy: corev1.PullAlways,
|
||||
@@ -165,13 +180,13 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
Image: "default-docker-image",
|
||||
Args: []string{
|
||||
"dockerd",
|
||||
"--host=unix:///run/docker/docker.sock",
|
||||
"--host=unix:///run/docker.sock",
|
||||
"--group=$(DOCKER_GROUP_GID)",
|
||||
},
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "DOCKER_GROUP_GID",
|
||||
Value: "121",
|
||||
Value: "1234",
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
@@ -180,8 +195,8 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
MountPath: "/runner",
|
||||
},
|
||||
{
|
||||
Name: "docker-sock",
|
||||
MountPath: "/run/docker",
|
||||
Name: "var-run",
|
||||
MountPath: "/run",
|
||||
},
|
||||
{
|
||||
Name: "work",
|
||||
@@ -397,6 +412,50 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
config: arcv1alpha1.RunnerConfig{},
|
||||
want: newTestPod(base, nil),
|
||||
},
|
||||
{
|
||||
description: "it should respect DOCKER_GROUP_GID of the dockerd sidecar container",
|
||||
template: corev1.Pod{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "docker",
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "DOCKER_GROUP_GID",
|
||||
Value: "2345",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: arcv1alpha1.RunnerConfig{},
|
||||
want: newTestPod(base, func(p *corev1.Pod) {
|
||||
setEnv(&p.Spec.Containers[1], "DOCKER_GROUP_GID", "2345")
|
||||
}),
|
||||
},
|
||||
{
|
||||
description: "it should add DOCKER_GROUP_GID=1001 to the dockerd sidecar container for Ubuntu 20.04 runners",
|
||||
template: corev1.Pod{},
|
||||
config: arcv1alpha1.RunnerConfig{
|
||||
Image: "ghcr.io/summerwind/actions-runner:ubuntu-20.04-20210726-1",
|
||||
},
|
||||
want: newTestPod(base, func(p *corev1.Pod) {
|
||||
setEnv(&p.Spec.Containers[1], "DOCKER_GROUP_GID", "1001")
|
||||
p.Spec.Containers[0].Image = "ghcr.io/summerwind/actions-runner:ubuntu-20.04-20210726-1"
|
||||
}),
|
||||
},
|
||||
{
|
||||
description: "it should add DOCKER_GROUP_GID=121 to the dockerd sidecar container for Ubuntu 22.04 runners",
|
||||
template: corev1.Pod{},
|
||||
config: arcv1alpha1.RunnerConfig{
|
||||
Image: "ghcr.io/summerwind/actions-runner:ubuntu-22.04-20210726-1",
|
||||
},
|
||||
want: newTestPod(base, func(p *corev1.Pod) {
|
||||
setEnv(&p.Spec.Containers[1], "DOCKER_GROUP_GID", "121")
|
||||
p.Spec.Containers[0].Image = "ghcr.io/summerwind/actions-runner:ubuntu-22.04-20210726-1"
|
||||
}),
|
||||
},
|
||||
{
|
||||
description: "dockerdWithinRunnerContainer=true should set privileged=true and omit the dind sidecar container",
|
||||
template: corev1.Pod{},
|
||||
@@ -484,7 +543,7 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "docker-sock",
|
||||
Name: "var-run",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{
|
||||
Medium: corev1.StorageMediumMemory,
|
||||
@@ -503,8 +562,8 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
MountPath: "/runner",
|
||||
},
|
||||
{
|
||||
Name: "docker-sock",
|
||||
MountPath: "/run/docker",
|
||||
Name: "var-run",
|
||||
MountPath: "/run",
|
||||
},
|
||||
}
|
||||
}),
|
||||
@@ -528,7 +587,7 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "docker-sock",
|
||||
Name: "var-run",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{
|
||||
Medium: corev1.StorageMediumMemory,
|
||||
@@ -552,7 +611,14 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
for i := range testcases {
|
||||
tc := testcases[i]
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
got, err := newRunnerPod(tc.template, tc.config, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, false)
|
||||
got, err := newRunnerPod(tc.template, tc.config, githubBaseURL, RunnerPodDefaults{
|
||||
RunnerImage: defaultRunnerImage,
|
||||
RunnerImagePullSecrets: defaultRunnerImagePullSecrets,
|
||||
DockerImage: defaultDockerImage,
|
||||
DockerRegistryMirror: defaultDockerRegistryMirror,
|
||||
DockerGID: "1234",
|
||||
UseRunnerStatusUpdateHook: false,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.want, got)
|
||||
})
|
||||
@@ -610,7 +676,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "docker-sock",
|
||||
Name: "var-run",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{
|
||||
Medium: corev1.StorageMediumMemory,
|
||||
@@ -674,7 +740,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Name: "DOCKER_HOST",
|
||||
Value: "unix:///run/docker/docker.sock",
|
||||
Value: "unix:///run/docker.sock",
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_NAME",
|
||||
@@ -695,8 +761,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
MountPath: "/runner/_work",
|
||||
},
|
||||
{
|
||||
Name: "docker-sock",
|
||||
MountPath: "/run/docker",
|
||||
Name: "var-run",
|
||||
MountPath: "/run",
|
||||
},
|
||||
},
|
||||
ImagePullPolicy: corev1.PullAlways,
|
||||
@@ -707,13 +773,13 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
Image: "default-docker-image",
|
||||
Args: []string{
|
||||
"dockerd",
|
||||
"--host=unix:///run/docker/docker.sock",
|
||||
"--host=unix:///run/docker.sock",
|
||||
"--group=$(DOCKER_GROUP_GID)",
|
||||
},
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "DOCKER_GROUP_GID",
|
||||
Value: "121",
|
||||
Value: "1234",
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
@@ -722,8 +788,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
MountPath: "/runner",
|
||||
},
|
||||
{
|
||||
Name: "docker-sock",
|
||||
MountPath: "/run/docker",
|
||||
Name: "var-run",
|
||||
MountPath: "/run",
|
||||
},
|
||||
{
|
||||
Name: "work",
|
||||
@@ -1083,8 +1149,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
MountPath: "/runner/_work",
|
||||
},
|
||||
{
|
||||
Name: "docker-sock",
|
||||
MountPath: "/run/docker",
|
||||
Name: "var-run",
|
||||
MountPath: "/run",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1104,7 +1170,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "docker-sock",
|
||||
Name: "var-run",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{
|
||||
Medium: corev1.StorageMediumMemory,
|
||||
@@ -1120,8 +1186,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
MountPath: "/runner/_work",
|
||||
},
|
||||
{
|
||||
Name: "docker-sock",
|
||||
MountPath: "/run/docker",
|
||||
Name: "var-run",
|
||||
MountPath: "/run",
|
||||
},
|
||||
{
|
||||
Name: "runner",
|
||||
@@ -1153,7 +1219,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "docker-sock",
|
||||
Name: "var-run",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{
|
||||
Medium: corev1.StorageMediumMemory,
|
||||
@@ -1171,6 +1237,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
defaultRunnerImage = "default-runner-image"
|
||||
defaultRunnerImagePullSecrets = []string{}
|
||||
defaultDockerImage = "default-docker-image"
|
||||
defaultDockerGID = "1234"
|
||||
defaultDockerRegistryMirror = ""
|
||||
githubBaseURL = "api.github.com"
|
||||
)
|
||||
@@ -1190,12 +1257,15 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
r := &RunnerReconciler{
|
||||
GitHubClient: multiClient,
|
||||
Scheme: scheme,
|
||||
RunnerPodDefaults: RunnerPodDefaults{
|
||||
RunnerImage: defaultRunnerImage,
|
||||
RunnerImagePullSecrets: defaultRunnerImagePullSecrets,
|
||||
DockerImage: defaultDockerImage,
|
||||
DockerRegistryMirror: defaultDockerRegistryMirror,
|
||||
GitHubClient: multiClient,
|
||||
Scheme: scheme,
|
||||
DockerGID: defaultDockerGID,
|
||||
},
|
||||
}
|
||||
got, err := r.newPod(tc.runner)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -68,15 +68,24 @@ type RunnerReconciler struct {
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
GitHubClient *MultiGitHubClient
|
||||
Name string
|
||||
RegistrationRecheckInterval time.Duration
|
||||
RegistrationRecheckJitter time.Duration
|
||||
UnregistrationRetryDelay time.Duration
|
||||
|
||||
RunnerPodDefaults RunnerPodDefaults
|
||||
}
|
||||
|
||||
type RunnerPodDefaults struct {
|
||||
RunnerImage string
|
||||
RunnerImagePullSecrets []string
|
||||
DockerImage string
|
||||
DockerRegistryMirror string
|
||||
Name string
|
||||
RegistrationRecheckInterval time.Duration
|
||||
RegistrationRecheckJitter time.Duration
|
||||
// The default Docker group ID to use for the dockerd sidecar container.
|
||||
// Ubuntu 20.04 runner images assumes 1001 and the 22.04 variant assumes 121 by default.
|
||||
DockerGID string
|
||||
|
||||
UseRunnerStatusUpdateHook bool
|
||||
UnregistrationRetryDelay time.Duration
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -145,7 +154,7 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
||||
|
||||
ready := runnerPodReady(&pod)
|
||||
|
||||
if (runner.Status.Phase != phase || runner.Status.Ready != ready) && !r.UseRunnerStatusUpdateHook || runner.Status.Phase == "" && r.UseRunnerStatusUpdateHook {
|
||||
if (runner.Status.Phase != phase || runner.Status.Ready != ready) && !r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Status.Phase == "" && r.RunnerPodDefaults.UseRunnerStatusUpdateHook {
|
||||
if pod.Status.Phase == corev1.PodRunning {
|
||||
// Seeing this message, you can expect the runner to become `Running` soon.
|
||||
log.V(1).Info(
|
||||
@@ -292,7 +301,7 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
needsServiceAccount := runner.Spec.ServiceAccountName == "" && (r.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes")
|
||||
needsServiceAccount := runner.Spec.ServiceAccountName == "" && (r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes")
|
||||
if needsServiceAccount {
|
||||
serviceAccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -306,7 +315,7 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
||||
|
||||
rules := []rbacv1.PolicyRule{}
|
||||
|
||||
if r.UseRunnerStatusUpdateHook {
|
||||
if r.RunnerPodDefaults.UseRunnerStatusUpdateHook {
|
||||
rules = append(rules, []rbacv1.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"actions.summerwind.dev"},
|
||||
@@ -583,7 +592,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
}
|
||||
}
|
||||
|
||||
pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, ghc.GithubBaseURL, r.UseRunnerStatusUpdateHook)
|
||||
pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, ghc.GithubBaseURL, r.RunnerPodDefaults)
|
||||
if err != nil {
|
||||
return pod, err
|
||||
}
|
||||
@@ -634,7 +643,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
|
||||
if runnerSpec.ServiceAccountName != "" {
|
||||
pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName
|
||||
} else if r.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes" {
|
||||
} else if r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes" {
|
||||
pod.Spec.ServiceAccountName = runner.ObjectMeta.Name
|
||||
}
|
||||
|
||||
@@ -754,13 +763,24 @@ func runnerHookEnvs(pod *corev1.Pod) ([]corev1.EnvVar, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, useRunnerStatusUpdateHook bool) (corev1.Pod, error) {
|
||||
func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, githubBaseURL string, d RunnerPodDefaults) (corev1.Pod, error) {
|
||||
var (
|
||||
privileged bool = true
|
||||
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
||||
dockerEnabled bool = runnerSpec.DockerEnabled == nil || *runnerSpec.DockerEnabled
|
||||
ephemeral bool = runnerSpec.Ephemeral == nil || *runnerSpec.Ephemeral
|
||||
dockerdInRunnerPrivileged bool = dockerdInRunner
|
||||
|
||||
defaultRunnerImage = d.RunnerImage
|
||||
defaultRunnerImagePullSecrets = d.RunnerImagePullSecrets
|
||||
defaultDockerImage = d.DockerImage
|
||||
defaultDockerRegistryMirror = d.DockerRegistryMirror
|
||||
useRunnerStatusUpdateHook = d.UseRunnerStatusUpdateHook
|
||||
)
|
||||
|
||||
const (
|
||||
varRunVolumeName = "var-run"
|
||||
varRunVolumeMountPath = "/run"
|
||||
)
|
||||
|
||||
if containerMode == "kubernetes" {
|
||||
@@ -1005,7 +1025,7 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
||||
// explicitly invoke `dockerd` to avoid automatic TLS / TCP binding
|
||||
dockerdContainer.Args = append([]string{
|
||||
"dockerd",
|
||||
"--host=unix:///run/docker/docker.sock",
|
||||
"--host=unix:///run/docker.sock",
|
||||
}, dockerdContainer.Args...)
|
||||
|
||||
// this must match a GID for the user in the runner image
|
||||
@@ -1013,10 +1033,22 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
||||
// for actions-runner-controller) so typically should not need to be
|
||||
// overridden
|
||||
if ok, _ := envVarPresent("DOCKER_GROUP_GID", dockerdContainer.Env); !ok {
|
||||
gid := d.DockerGID
|
||||
// We default to gid 121 for Ubuntu 22.04 images
|
||||
// See below for more details
|
||||
// - https://github.com/actions/actions-runner-controller/issues/2490#issuecomment-1501561923
|
||||
// - https://github.com/actions/actions-runner-controller/blob/8869ad28bb5a1daaedefe0e988571fe1fb36addd/runner/actions-runner.ubuntu-20.04.dockerfile#L14
|
||||
// - https://github.com/actions/actions-runner-controller/blob/8869ad28bb5a1daaedefe0e988571fe1fb36addd/runner/actions-runner.ubuntu-22.04.dockerfile#L12
|
||||
if strings.Contains(runnerContainer.Image, "22.04") {
|
||||
gid = "121"
|
||||
} else if strings.Contains(runnerContainer.Image, "20.04") {
|
||||
gid = "1001"
|
||||
}
|
||||
|
||||
dockerdContainer.Env = append(dockerdContainer.Env,
|
||||
corev1.EnvVar{
|
||||
Name: "DOCKER_GROUP_GID",
|
||||
Value: "121",
|
||||
Value: gid,
|
||||
})
|
||||
}
|
||||
dockerdContainer.Args = append(dockerdContainer.Args, "--group=$(DOCKER_GROUP_GID)")
|
||||
@@ -1027,7 +1059,7 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
||||
runnerContainer.Env = append(runnerContainer.Env,
|
||||
corev1.EnvVar{
|
||||
Name: "DOCKER_HOST",
|
||||
Value: "unix:///run/docker/docker.sock",
|
||||
Value: "unix:///run/docker.sock",
|
||||
},
|
||||
)
|
||||
|
||||
@@ -1044,7 +1076,7 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
||||
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||
corev1.Volume{
|
||||
Name: "docker-sock",
|
||||
Name: varRunVolumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{
|
||||
Medium: corev1.StorageMediumMemory,
|
||||
@@ -1063,11 +1095,11 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
||||
)
|
||||
}
|
||||
|
||||
if ok, _ := volumeMountPresent("docker-sock", runnerContainer.VolumeMounts); !ok {
|
||||
if ok, _ := volumeMountPresent(varRunVolumeName, runnerContainer.VolumeMounts); !ok {
|
||||
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts,
|
||||
corev1.VolumeMount{
|
||||
Name: "docker-sock",
|
||||
MountPath: "/run/docker",
|
||||
Name: varRunVolumeName,
|
||||
MountPath: varRunVolumeMountPath,
|
||||
},
|
||||
)
|
||||
}
|
||||
@@ -1081,10 +1113,10 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
||||
},
|
||||
}
|
||||
|
||||
if p, _ := volumeMountPresent("docker-sock", dockerdContainer.VolumeMounts); !p {
|
||||
if p, _ := volumeMountPresent(varRunVolumeName, dockerdContainer.VolumeMounts); !p {
|
||||
dockerVolumeMounts = append(dockerVolumeMounts, corev1.VolumeMount{
|
||||
Name: "docker-sock",
|
||||
MountPath: "/run/docker",
|
||||
Name: varRunVolumeName,
|
||||
MountPath: varRunVolumeMountPath,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1240,10 +1272,6 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
||||
return *pod, nil
|
||||
}
|
||||
|
||||
func newRunnerPod(template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, useRunnerStatusUpdateHookEphemeralRole bool) (corev1.Pod, error) {
|
||||
return newRunnerPodWithContainerMode("", template, runnerSpec, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, useRunnerStatusUpdateHookEphemeralRole)
|
||||
}
|
||||
|
||||
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
name := "runner-controller"
|
||||
if r.Name != "" {
|
||||
|
||||
@@ -47,11 +47,8 @@ type RunnerSetReconciler struct {
|
||||
|
||||
CommonRunnerLabels []string
|
||||
GitHubClient *MultiGitHubClient
|
||||
RunnerImage string
|
||||
RunnerImagePullSecrets []string
|
||||
DockerImage string
|
||||
DockerRegistryMirror string
|
||||
UseRunnerStatusUpdateHook bool
|
||||
|
||||
RunnerPodDefaults RunnerPodDefaults
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -231,7 +228,7 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
|
||||
|
||||
githubBaseURL := ghc.GithubBaseURL
|
||||
|
||||
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, githubBaseURL, r.UseRunnerStatusUpdateHook)
|
||||
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, githubBaseURL, r.RunnerPodDefaults)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ You can create workflows that build and test every pull request to your reposito
|
||||
Runners execute the job that is assigned to them by Github Actions workflow. There are two types of Runners:
|
||||
|
||||
- [Github-hosted runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners) - GitHub provides Linux, Windows, and macOS virtual machines to run your workflows. These virtual machines are hosted in the cloud by Github.
|
||||
- [Self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners) - you can host your own self-hosted runners in your own data center or cloud infrastructure. ARC deploys self-hosted runners.
|
||||
- [Self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/about-self-hosted-runners) - you can host your own self-hosted runners in your own data center or cloud infrastructure. ARC deploys self-hosted runners.
|
||||
|
||||
## Self hosted runners
|
||||
Self-hosted runners offer more control of hardware, operating system, and software tools than GitHub-hosted runners. With self-hosted runners, you can create custom hardware configurations that meet your needs with processing power or memory to run larger jobs, install software available on your local network, and choose an operating system not offered by GitHub-hosted runners.
|
||||
@@ -83,7 +83,7 @@ The GitHub hosted runners include a large amount of pre-installed software packa
|
||||
ARC maintains a few runner images with `latest` aligning with GitHub's Ubuntu version. These images do not contain all of the software installed on the GitHub runners. They contain subset of packages from the GitHub runners: Basic CLI packages, git, docker and build-essentials. To install additional software, it is recommended to use the corresponding setup actions. For instance, `actions/setup-java` for Java or `actions/setup-node` for Node.
|
||||
|
||||
## Executing workflows
|
||||
Now, all the setup and configuration is done. A workflow can be created in the same repository that could target the self hosted runner created from ARC. The workflow needs to have `runs-on: self-hosted` so it can target the self host pool. For more information on targeting workflows to run on self hosted runners, see "[Using Self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow)."
|
||||
Now, all the setup and configuration is done. A workflow can be created in the same repository that could target the self hosted runner created from ARC. The workflow needs to have `runs-on: self-hosted` so it can target the self host pool. For more information on targeting workflows to run on self hosted runners, see "[Using Self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/using-self-hosted-runners-in-a-workflow)."
|
||||
|
||||
## Scaling runners - statically with replicas count
|
||||
With a small tweak to the replicas count (for eg - `replicas: 2`) in the `runnerdeployment.yaml` file, more runners can be created. Depending on the count of replicas, those many sets of pods would be created. As before, Each pod contains the two containers.
|
||||
|
||||
@@ -86,4 +86,4 @@ Or for example if they're having problems specifically with runners:
|
||||
This way users don't have to understand ARC moving parts but we still have a
|
||||
way to target them specifically if we need to.
|
||||
|
||||
[^1]: Superseded by [ADR 2023-04-14](2023-04-14-adding-labels-k8s-resources.md)
|
||||
[^1]: Superseded by [ADR 2023-03-14](2023-03-14-adding-labels-k8s-resources.md)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
**Date**: 2023-02-10
|
||||
|
||||
**Status**: Done
|
||||
**Status**: Superceded [^1]
|
||||
|
||||
## Context
|
||||
|
||||
@@ -136,3 +136,5 @@ The downside of this mode:
|
||||
|
||||
- When you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other.
|
||||
- You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster.
|
||||
|
||||
[^1]: Superseded by [ADR 2023-04-11](2023-04-11-limit-manager-role-permission.md)
|
||||
|
||||
167
docs/adrs/2023-04-11-limit-manager-role-permission.md
Normal file
167
docs/adrs/2023-04-11-limit-manager-role-permission.md
Normal file
@@ -0,0 +1,167 @@
|
||||
# ADR 2023-04-11: Limit Permissions for Service Accounts in Actions-Runner-Controller
|
||||
|
||||
**Date**: 2023-04-11
|
||||
|
||||
**Status**: Done [^1]
|
||||
|
||||
## Context
|
||||
|
||||
- `actions-runner-controller` is a Kubernetes CRD (with controller) built using https://github.com/kubernetes-sigs/controller-runtime
|
||||
|
||||
- [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) has a default cache based k8s API client.Reader to make query k8s API server more efficiency.
|
||||
|
||||
- The cache-based API client requires cluster scope `list` and `watch` permission for any resource the controller may query.
|
||||
|
||||
- This documentation only scopes to the AutoscalingRunnerSet CRD and its controller.
|
||||
|
||||
## Service accounts and their role binding in actions-runner-controller
|
||||
|
||||
There are 3 service accounts involved for a working `AutoscalingRunnerSet` based `actions-runner-controller`
|
||||
|
||||
1. Service account for each Ephemeral runner Pod
|
||||
|
||||
This should have the lowest privilege (not any `RoleBinding` nor `ClusterRoleBinding`) by default, in the case of `containerMode=kubernetes`, it will get certain write permission with `RoleBinding` to limit the permission to a single namespace.
|
||||
|
||||
> References:
|
||||
>
|
||||
> - ./charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml
|
||||
> - ./charts/gha-runner-scale-set/templates/kube_mode_role.yaml
|
||||
> - ./charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml
|
||||
> - ./charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml
|
||||
|
||||
2. Service account for AutoScalingListener Pod
|
||||
|
||||
This has a `RoleBinding` to a single namespace with a `Role` that has permission to `PATCH` `EphemeralRunnerSet` and `EphemeralRunner`.
|
||||
|
||||
3. Service account for the controller manager
|
||||
|
||||
Since the CRD controller is a singleton installed in the cluster that manages the CRD across multiple namespaces by default, the service account of the controller manager pod has a `ClusterRoleBinding` to a `ClusterRole` with broader permissions.
|
||||
|
||||
The current `ClusterRole` has the following permissions:
|
||||
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingRunnerSets` (with `Status` and `Finalizer` sub-resource)
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingListeners` (with `Status` and `Finalizer` sub-resource)
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunnerSets` (with `Status` and `Finalizer` sub-resource)
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunners` (with `Status` and `Finalizer` sub-resource)
|
||||
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `Pods` (with `Status` sub-resource)
|
||||
- **Get/List/Create/Delete/Update/Patch/Watch on `Secrets`**
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `Roles`
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `RoleBindings`
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `ServiceAccounts`
|
||||
|
||||
> Full list can be found at: https://github.com/actions/actions-runner-controller/blob/facae69e0b189d3b5dd659f36df8a829516d2896/charts/actions-runner-controller-2/templates/manager_role.yaml
|
||||
|
||||
## Limit cluster role permission on Secrets
|
||||
|
||||
The cluster scope `List` `Secrets` permission might be a blocker for adopting `actions-runner-controller` for certain customers as they may have certain restriction in their cluster that simply doesn't allow any service account to have cluster scope `List Secrets` permission.
|
||||
|
||||
To help these customers and improve security for `actions-runner-controller` in general, we will try to limit the `ClusterRole` permission of the controller manager's service account down to the following:
|
||||
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingRunnerSets` (with `Status` and `Finalizer` sub-resource)
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingListeners` (with `Status` and `Finalizer` sub-resource)
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunnerSets` (with `Status` and `Finalizer` sub-resource)
|
||||
- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunners` (with `Status` and `Finalizer` sub-resource)
|
||||
|
||||
- List/Watch on `Pods`
|
||||
- List/Watch/Patch on `Roles`
|
||||
- List/Watch on `RoleBindings`
|
||||
- List/Watch on `ServiceAccounts`
|
||||
|
||||
> We will change the default cache-based client to bypass cache on reading `Secrets` and `ConfigMaps`(ConfigMap is used when you configure `githubServerTLS`), so we can eliminate the need for `List` and `Watch` `Secrets` permission in cluster scope.
|
||||
|
||||
Introduce a new `Role` for the controller and `RoleBinding` the `Role` with the controller's `ServiceAccount` in the namespace the controller is deployed. This role will grant the controller's service account required permission to work with `AutoScalingListeners` in the controller namespace.
|
||||
|
||||
- Get/Create/Delete on `Pods`
|
||||
- Get on `Pods/status`
|
||||
- Get/Create/Delete/Update/Patch on `Secrets`
|
||||
- Get/Create/Delete/Update/Patch on `ServiceAccounts`
|
||||
|
||||
The `Role` and `RoleBinding` creation will happen during the `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller`
|
||||
|
||||
During `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller`, we will store the controller's service account info as labels on the controller `Deployment`.
|
||||
Ex:
|
||||
|
||||
```yaml
|
||||
actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }}
|
||||
actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||
```
|
||||
|
||||
Introduce a new `Role` per `AutoScalingRunnerSet` installation and `RoleBinding` the `Role` with the controller's `ServiceAccount` in the namespace that each `AutoScalingRunnerSet` deployed with the following permission.
|
||||
|
||||
- Get/Create/Delete/Update/Patch/List on `Secrets`
|
||||
- Create/Delete on `Pods`
|
||||
- Get on `Pods/status`
|
||||
- Get/Create/Delete/Update/Patch on `Roles`
|
||||
- Get/Create/Delete/Update/Patch on `RoleBindings`
|
||||
- Get on `ConfigMaps`
|
||||
|
||||
The `Role` and `RoleBinding` creation will happen during `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set` to grant the controller's service account required permissions to operate in the namespace the `AutoScalingRunnerSet` deployed.
|
||||
|
||||
The `gha-runner-scale-set` helm chart will try to find the `Deployment` of the controller using `helm lookup`, and get the service account info from the labels of the controller `Deployment` (`actions.github.com/controller-service-account-namespace` and `actions.github.com/controller-service-account-name`).
|
||||
|
||||
The `gha-runner-scale-set` helm chart will use this service account to properly render the `RoleBinding` template.
|
||||
|
||||
The `gha-runner-scale-set` helm chart will also allow customers to explicitly provide the controller service account info, in case the `helm lookup` couldn't locate the right controller `Deployment`.
|
||||
|
||||
New sections in `values.yaml` of `gha-runner-scale-set`:
|
||||
|
||||
```yaml
|
||||
## Optional controller service account that needs to have required Role and RoleBinding
|
||||
## to operate this gha-runner-scale-set installation.
|
||||
## The helm chart will try to find the controller deployment and its service account at installation time.
|
||||
## In case the helm chart can't find the right service account, you can explicitly pass in the following value
|
||||
## to help it finish RoleBinding with the right service account.
|
||||
## Note: if your controller is installed to only watch a single namespace, you have to pass these values explicitly.
|
||||
controllerServiceAccount:
|
||||
namespace: arc-system
|
||||
name: test-arc-gha-runner-scale-set-controller
|
||||
```
|
||||
|
||||
## Install ARC to only watch/react resources in a single namespace
|
||||
|
||||
In case the user doesn't want to have any `ClusterRole`, they can choose to install the `actions-runner-controller` in a mode that only requires a `Role` with `RoleBinding` in a particular namespace.
|
||||
|
||||
In this mode, the `actions-runner-controller` will only be able to watch the `AutoScalingRunnerSet` resource in a single namespace.
|
||||
|
||||
If you want to deploy multiple `AutoScalingRunnerSet` into different namespaces, you will need to install `actions-runner-controller` in this mode multiple times as well and have each installation watch the namespace you want to deploy an `AutoScalingRunnerSet`
|
||||
|
||||
You will install `actions-runner-controller` with something like `helm install arc --namespace arc-system --set watchSingleNamespace=test-namespace oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller` (the `test-namespace` namespace needs to be created first).
|
||||
|
||||
You will deploy the `AutoScalingRunnerSet` with something like `helm install demo --namespace TestNamespace oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set`
|
||||
|
||||
In this mode, you will end up with a manager `Role` that has all Get/List/Create/Delete/Update/Patch/Watch permissions on resources we need, and a `RoleBinding` to bind the `Role` with the controller `ServiceAccount` in the watched single namespace and the controller namespace, ex: `test-namespace` and `arc-system` in the above example.
|
||||
|
||||
The downside of this mode:
|
||||
|
||||
- When you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other.
|
||||
- You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster.
|
||||
|
||||
## Cleanup process
|
||||
|
||||
We will apply following annotations during the installation that are going to be used in the cleanup process (`helm uninstall`). If annotation is not present, cleanup of that resource is going to be skipped.
|
||||
|
||||
The cleanup only patches the resource removing the `actions.github.com/cleanup-protection` finalizer. The client that created a resource is responsible for deleting them. Keep in mind, `helm uninstall` will automatically delete resources, causing the cleanup procedure to be complete.
|
||||
|
||||
Annotations applied to the `AutoscalingRunnerSet` used in the cleanup procedure
|
||||
are:
|
||||
|
||||
- `actions.github.com/cleanup-github-secret-name`
|
||||
- `actions.github.com/cleanup-manager-role-binding`
|
||||
- `actions.github.com/cleanup-manager-role-name`
|
||||
- `actions.github.com/cleanup-kubernetes-mode-role-binding-name`
|
||||
- `actions.github.com/cleanup-kubernetes-mode-role-name`
|
||||
- `actions.github.com/cleanup-kubernetes-mode-service-account-name`
|
||||
- `actions.github.com/cleanup-no-permission-service-account-name`
|
||||
|
||||
The order in which resources are being patched to remove finalizers:
|
||||
|
||||
1. Kubernetes mode `RoleBinding`
|
||||
1. Kubernetes mode `Role`
|
||||
1. Kubernetes mode `ServiceAccount`
|
||||
1. No permission `ServiceAccount`
|
||||
1. GitHub `Secret`
|
||||
1. Manager `RoleBinding`
|
||||
1. Manager `Role`
|
||||
|
||||
[^1]: Supersedes [ADR 2023-02-10](2023-02-10-limit-manager-role-permission.md)
|
||||
@@ -17,7 +17,7 @@ This anti-flap configuration also has the final say on if a runner can be scaled
|
||||
This delay is configurable via 2 methods:
|
||||
|
||||
1. By setting a new default via the controller's `--default-scale-down-delay` flag
|
||||
2. By setting by setting the attribute `scaleDownDelaySecondsAfterScaleOut:` in a `HorizontalRunnerAutoscaler` kind's `spec:`.
|
||||
2. By setting the attribute `scaleDownDelaySecondsAfterScaleOut:` in a `HorizontalRunnerAutoscaler` kind's `spec:`.
|
||||
|
||||
Below is a complete basic example of one of the pull driven scaling metrics.
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## Usage
|
||||
|
||||
[GitHub self-hosted runners can be deployed at various levels in a management hierarchy](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners#about-self-hosted-runners):
|
||||
[GitHub self-hosted runners can be deployed at various levels in a management hierarchy](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/about-self-hosted-runners#about-self-hosted-runners):
|
||||
- The repository level
|
||||
- The organization level
|
||||
- The enterprise level
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## Runner Groups
|
||||
|
||||
Runner groups can be used to limit which repositories are able to use the GitHub Runner at an organization level. Runner groups have to be [created in GitHub first](https://docs.github.com/en/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups) before they can be referenced.
|
||||
Runner groups can be used to limit which repositories are able to use the GitHub Runner at an organization level. Runner groups have to be [created in GitHub first](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/managing-access-to-self-hosted-runners-using-groups) before they can be referenced.
|
||||
|
||||
To add the runner to the group `NewGroup`, specify the group in your `Runner` or `RunnerDeployment` spec.
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ In addition to the increased reliability of the automatic scaling, we have worke
|
||||
|
||||
### Demo
|
||||
|
||||
https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a73e-27f5e8c75720.mp4
|
||||
[](https://youtu.be/wQ0k5k6KW5Y)
|
||||
|
||||
## Setup
|
||||
|
||||
@@ -68,7 +68,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
|
||||
GITHUB_APP_ID="<GITHUB_APP_ID>"
|
||||
GITHUB_APP_INSTALLATION_ID="<GITHUB_APP_INSTALLATION_ID>"
|
||||
GITHUB_APP_PRIVATE_KEY="<GITHUB_APP_PRIVATE_KEY>"
|
||||
helm install arc-runner-set \
|
||||
helm install "${INSTALLATION_NAME}" \
|
||||
--namespace "${NAMESPACE}" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="${GITHUB_CONFIG_URL}" \
|
||||
@@ -102,7 +102,6 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
|
||||
name: Test workflow
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: arc-runner-set
|
||||
|
||||
@@ -132,9 +132,9 @@ NAME READY STATUS RESTARTS AGE
|
||||
example-runnerdeploy2475ht2qbr 2/2 Running 0 1m
|
||||
````
|
||||
|
||||
Also, this runner has been registered directly to the specified repository, you can see it in repository settings. For more information, see "[Checking the status of a self-hosted runner - GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/monitoring-and-troubleshooting-self-hosted-runners#checking-the-status-of-a-self-hosted-runner)."
|
||||
Also, this runner has been registered directly to the specified repository, you can see it in repository settings. For more information, see "[Checking the status of a self-hosted runner - GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/monitoring-and-troubleshooting-self-hosted-runners#checking-the-status-of-a-self-hosted-runner)."
|
||||
|
||||
:two: You are ready to execute workflows against this self-hosted runner. For more information, see "[Using self-hosted runners in a workflow - GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow#using-self-hosted-runners-in-a-workflow)."
|
||||
:two: You are ready to execute workflows against this self-hosted runner. For more information, see "[Using self-hosted runners in a workflow - GitHub Docs](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/using-self-hosted-runners-in-a-workflow#using-self-hosted-runners-in-a-workflow)."
|
||||
|
||||
There is also a quick start guide to get started on Actions, For more information, please refer to "[Quick start Guide to GitHub Actions](https://docs.github.com/en/actions/quickstart)."
|
||||
|
||||
|
||||
@@ -37,4 +37,4 @@ jobs:
|
||||
When using labels there are a few things to be aware of:
|
||||
|
||||
1. `self-hosted` is implict with every runner as this is an automatic label GitHub apply to any self-hosted runner. As a result ARC can treat all runners as having this label without having it explicitly defined in a runner's manifest. You do not need to explicitly define this label in your runner manifests (you can if you want though).
|
||||
2. In addition to the `self-hosted` label, GitHub also applies a few other [default](https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow#using-default-labels-to-route-jobs) labels to any self-hosted runner. The other default labels relate to the architecture of the runner and so can't be implicitly applied by ARC as ARC doesn't know if the runner is `linux` or `windows`, `x64` or `ARM64` etc. If you wish to use these labels in your workflows and have ARC scale runners accurately you must also add them to your runner manifests.
|
||||
2. In addition to the `self-hosted` label, GitHub also applies a few other [default](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/using-self-hosted-runners-in-a-workflow#using-default-labels-to-route-jobs) labels to any self-hosted runner. The other default labels relate to the architecture of the runner and so can't be implicitly applied by ARC as ARC doesn't know if the runner is `linux` or `windows`, `x64` or `ARM64` etc. If you wish to use these labels in your workflows and have ARC scale runners accurately you must also add them to your runner manifests.
|
||||
@@ -160,7 +160,7 @@ spec:
|
||||
|
||||
### PV-backed runner work directory
|
||||
|
||||
ARC works by automatically creating runner pods for running [`actions/runner`](https://github.com/actions/runner) and [running `config.sh`](https://docs.github.com/en/actions/hosting-your-own-runners/adding-self-hosted-runners#adding-a-self-hosted-runner-to-a-repository) which you had to ran manually without ARC.
|
||||
ARC works by automatically creating runner pods for running [`actions/runner`](https://github.com/actions/runner) and [running `config.sh`](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/adding-self-hosted-runners#adding-a-self-hosted-runner-to-a-repository) which you had to ran manually without ARC.
|
||||
|
||||
`config.sh` is the script provided by `actions/runner` to pre-configure the runner process before being started. One of the options provided by `config.sh` is `--work`,
|
||||
which specifies the working directory where the runner runs your workflow jobs in.
|
||||
|
||||
@@ -84,6 +84,8 @@ func (c *Config) NewClient() (*Client, error) {
|
||||
return nil, fmt.Errorf("enterprise url incorrect: %v", err)
|
||||
}
|
||||
tr.BaseURL = githubAPIURL
|
||||
} else if c.URL != "" && tr.BaseURL != c.URL {
|
||||
tr.BaseURL = c.URL
|
||||
}
|
||||
transport = tr
|
||||
}
|
||||
|
||||
42
hack/check-gh-chart-versions.sh
Executable file
42
hack/check-gh-chart-versions.sh
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
# Checks the chart versions against an input version. Fails on mismatch.
|
||||
#
|
||||
# Usage:
|
||||
# check-gh-chart-versions.sh <VERSION>
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
TEXT_RED='\033[0;31m'
|
||||
TEXT_RESET='\033[0m'
|
||||
TEXT_GREEN='\033[0;32m'
|
||||
|
||||
target_version=$1
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Release version argument is required"
|
||||
echo
|
||||
echo "Usage: ${0} <VERSION>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
chart_dir="$(pwd)/charts"
|
||||
|
||||
controller_version=$(yq .version < "${chart_dir}/gha-runner-scale-set-controller/Chart.yaml")
|
||||
controller_app_version=$(yq .appVersion < "${chart_dir}/gha-runner-scale-set-controller/Chart.yaml")
|
||||
|
||||
scaleset_version=$(yq .version < "${chart_dir}/gha-runner-scale-set/Chart.yaml")
|
||||
scaleset_app_version=$(yq .appVersion < "${chart_dir}/gha-runner-scale-set/Chart.yaml")
|
||||
|
||||
if [[ "${controller_version}" != "${target_version}" ]] ||
|
||||
[[ "${controller_app_version}" != "${target_version}" ]] ||
|
||||
[[ "${scaleset_version}" != "${target_version}" ]] ||
|
||||
[[ "${scaleset_app_version}" != "${target_version}" ]]; then
|
||||
echo -e "${TEXT_RED}Chart versions do not match${TEXT_RESET}"
|
||||
echo "Target version: ${target_version}"
|
||||
echo "Controller version: ${controller_version}"
|
||||
echo "Controller app version: ${controller_app_version}"
|
||||
echo "Scale set version: ${scaleset_version}"
|
||||
echo "Scale set app version: ${scaleset_app_version}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${TEXT_GREEN}Chart versions: ${controller_version}"
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
COMMIT=$(git rev-parse HEAD)
|
||||
TAG=$(git describe --exact-match --abbrev=0 --tags "${COMMIT}" 2> /dev/null || true)
|
||||
BRANCH=$(git branch | grep \* | cut -d ' ' -f2 | sed -e 's/[^a-zA-Z0-9+=._:/-]*//g' || true)
|
||||
BRANCH=$(git branch | grep "\*" | cut -d ' ' -f2 | sed -e 's/[^a-zA-Z0-9+=._:/-]*//g' || true)
|
||||
VERSION=""
|
||||
|
||||
if [ -z "$TAG" ]; then
|
||||
|
||||
36
main.go
36
main.go
@@ -45,6 +45,7 @@ import (
|
||||
const (
|
||||
defaultRunnerImage = "summerwind/actions-runner:latest"
|
||||
defaultDockerImage = "docker:dind"
|
||||
defaultDockerGID = "1001"
|
||||
)
|
||||
|
||||
var scheme = runtime.NewScheme()
|
||||
@@ -76,18 +77,15 @@ func main() {
|
||||
autoScalingRunnerSetOnly bool
|
||||
enableLeaderElection bool
|
||||
disableAdmissionWebhook bool
|
||||
runnerStatusUpdateHook bool
|
||||
leaderElectionId string
|
||||
port int
|
||||
syncPeriod time.Duration
|
||||
|
||||
defaultScaleDownDelay time.Duration
|
||||
|
||||
runnerImage string
|
||||
runnerImagePullSecrets stringSlice
|
||||
runnerPodDefaults actionssummerwindnet.RunnerPodDefaults
|
||||
|
||||
dockerImage string
|
||||
dockerRegistryMirror string
|
||||
namespace string
|
||||
logLevel string
|
||||
logFormat string
|
||||
@@ -108,10 +106,11 @@ func main() {
|
||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
||||
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||
flag.StringVar(&leaderElectionId, "leader-election-id", "actions-runner-controller", "Controller id for leader election.")
|
||||
flag.StringVar(&runnerImage, "runner-image", defaultRunnerImage, "The image name of self-hosted runner container to use by default if one isn't defined in yaml.")
|
||||
flag.StringVar(&dockerImage, "docker-image", defaultDockerImage, "The image name of docker sidecar container to use by default if one isn't defined in yaml.")
|
||||
flag.StringVar(&runnerPodDefaults.RunnerImage, "runner-image", defaultRunnerImage, "The image name of self-hosted runner container to use by default if one isn't defined in yaml.")
|
||||
flag.StringVar(&runnerPodDefaults.DockerImage, "docker-image", defaultDockerImage, "The image name of docker sidecar container to use by default if one isn't defined in yaml.")
|
||||
flag.StringVar(&runnerPodDefaults.DockerGID, "docker-gid", defaultDockerGID, "The default GID of docker group in the docker sidecar container. Use 1001 for dockerd sidecars of Ubuntu 20.04 runners 121 for Ubuntu 22.04.")
|
||||
flag.Var(&runnerImagePullSecrets, "runner-image-pull-secret", "The default image-pull secret name for self-hosted runner container.")
|
||||
flag.StringVar(&dockerRegistryMirror, "docker-registry-mirror", "", "The default Docker Registry Mirror used by runners.")
|
||||
flag.StringVar(&runnerPodDefaults.DockerRegistryMirror, "docker-registry-mirror", "", "The default Docker Registry Mirror used by runners.")
|
||||
flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.")
|
||||
flag.StringVar(&c.EnterpriseURL, "github-enterprise-url", c.EnterpriseURL, "Enterprise URL to be used for your GitHub API calls")
|
||||
flag.Int64Var(&c.AppID, "github-app-id", c.AppID, "The application ID of GitHub App.")
|
||||
@@ -122,7 +121,7 @@ func main() {
|
||||
flag.StringVar(&c.BasicauthUsername, "github-basicauth-username", c.BasicauthUsername, "Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API")
|
||||
flag.StringVar(&c.BasicauthPassword, "github-basicauth-password", c.BasicauthPassword, "Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API")
|
||||
flag.StringVar(&c.RunnerGitHubURL, "runner-github-url", c.RunnerGitHubURL, "GitHub URL to be used by runners during registration")
|
||||
flag.BoolVar(&runnerStatusUpdateHook, "runner-status-update-hook", false, "Use custom RBAC for runners (role, role binding and service account).")
|
||||
flag.BoolVar(&runnerPodDefaults.UseRunnerStatusUpdateHook, "runner-status-update-hook", false, "Use custom RBAC for runners (role, role binding and service account).")
|
||||
flag.DurationVar(&defaultScaleDownDelay, "default-scale-down-delay", actionssummerwindnet.DefaultScaleDownDelay, "The approximate delay for a scale down followed by a scale up, used to prevent flapping (down->up->down->... loop)")
|
||||
flag.IntVar(&port, "port", 9443, "The port to which the admission webhook endpoint should bind")
|
||||
flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.")
|
||||
@@ -135,6 +134,8 @@ func main() {
|
||||
flag.Var(&autoScalerImagePullSecrets, "auto-scaler-image-pull-secrets", "The default image-pull secret name for auto-scaler listener container.")
|
||||
flag.Parse()
|
||||
|
||||
runnerPodDefaults.RunnerImagePullSecrets = runnerImagePullSecrets
|
||||
|
||||
log, err := logging.NewLogger(logLevel, logFormat)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err)
|
||||
@@ -259,12 +260,7 @@ func main() {
|
||||
Log: log.WithName("runner"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
GitHubClient: multiClient,
|
||||
DockerImage: dockerImage,
|
||||
DockerRegistryMirror: dockerRegistryMirror,
|
||||
UseRunnerStatusUpdateHook: runnerStatusUpdateHook,
|
||||
// Defaults for self-hosted runner containers
|
||||
RunnerImage: runnerImage,
|
||||
RunnerImagePullSecrets: runnerImagePullSecrets,
|
||||
RunnerPodDefaults: runnerPodDefaults,
|
||||
}
|
||||
|
||||
if err = runnerReconciler.SetupWithManager(mgr); err != nil {
|
||||
@@ -300,13 +296,8 @@ func main() {
|
||||
Log: log.WithName("runnerset"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
CommonRunnerLabels: commonRunnerLabels,
|
||||
DockerImage: dockerImage,
|
||||
DockerRegistryMirror: dockerRegistryMirror,
|
||||
GitHubClient: multiClient,
|
||||
// Defaults for self-hosted runner containers
|
||||
RunnerImage: runnerImage,
|
||||
RunnerImagePullSecrets: runnerImagePullSecrets,
|
||||
UseRunnerStatusUpdateHook: runnerStatusUpdateHook,
|
||||
RunnerPodDefaults: runnerPodDefaults,
|
||||
}
|
||||
|
||||
if err = runnerSetReconciler.SetupWithManager(mgr); err != nil {
|
||||
@@ -319,8 +310,9 @@ func main() {
|
||||
"version", build.Version,
|
||||
"default-scale-down-delay", defaultScaleDownDelay,
|
||||
"sync-period", syncPeriod,
|
||||
"default-runner-image", runnerImage,
|
||||
"default-docker-image", dockerImage,
|
||||
"default-runner-image", runnerPodDefaults.RunnerImage,
|
||||
"default-docker-image", runnerPodDefaults.DockerImage,
|
||||
"default-docker-gid", runnerPodDefaults.DockerGID,
|
||||
"common-runnner-labels", commonRunnerLabels,
|
||||
"leader-election-enabled", enableLeaderElection,
|
||||
"leader-election-id", leaderElectionId,
|
||||
|
||||
@@ -6,8 +6,8 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless
|
||||
OS_IMAGE ?= ubuntu-22.04
|
||||
TARGETPLATFORM ?= $(shell arch)
|
||||
|
||||
RUNNER_VERSION ?= 2.303.0
|
||||
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.2.0
|
||||
RUNNER_VERSION ?= 2.304.0
|
||||
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.3.1
|
||||
DOCKER_VERSION ?= 20.10.23
|
||||
|
||||
# default list of platforms for which multiarch image is built
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
2.303.0
|
||||
RUNNER_VERSION=2.304.0
|
||||
RUNNER_CONTAINER_HOOKS_VERSION=0.3.1
|
||||
@@ -2,7 +2,7 @@ FROM ubuntu:20.04
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG RUNNER_VERSION
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION
|
||||
# Docker and Docker Compose arguments
|
||||
ENV CHANNEL=stable
|
||||
ARG DOCKER_COMPOSE_VERSION=v2.16.0
|
||||
|
||||
@@ -2,7 +2,7 @@ FROM ubuntu:22.04
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG RUNNER_VERSION
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION
|
||||
# Docker and Docker Compose arguments
|
||||
ENV CHANNEL=stable
|
||||
ARG DOCKER_COMPOSE_VERSION=v2.16.0
|
||||
|
||||
@@ -2,7 +2,7 @@ FROM ubuntu:20.04
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG RUNNER_VERSION
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION
|
||||
# Docker and Docker Compose arguments
|
||||
ARG CHANNEL=stable
|
||||
ARG DOCKER_VERSION=20.10.23
|
||||
|
||||
@@ -2,7 +2,7 @@ FROM ubuntu:22.04
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG RUNNER_VERSION
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION
|
||||
# Docker and Docker Compose arguments
|
||||
ARG CHANNEL=stable
|
||||
ARG DOCKER_VERSION=20.10.23
|
||||
|
||||
@@ -2,7 +2,7 @@ FROM ubuntu:20.04
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG RUNNER_VERSION
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION
|
||||
# Docker and Docker Compose arguments
|
||||
ARG CHANNEL=stable
|
||||
ARG DOCKER_VERSION=20.10.23
|
||||
|
||||
@@ -2,7 +2,7 @@ FROM ubuntu:22.04
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG RUNNER_VERSION
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION=0.2.0
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION
|
||||
# Docker and Docker Compose arguments
|
||||
ARG CHANNEL=stable
|
||||
ARG DOCKER_VERSION=20.10.23
|
||||
@@ -98,6 +98,8 @@ COPY docker-shim.sh /usr/local/bin/docker
|
||||
# Configure hooks folder structure.
|
||||
COPY hooks /etc/arc/hooks/
|
||||
|
||||
# Add the Python "User Script Directory" to the PATH
|
||||
ENV PATH="${PATH}:${HOME}/.local/bin/"
|
||||
ENV ImageOS=ubuntu22
|
||||
|
||||
RUN echo "PATH=${PATH}" > /etc/environment \
|
||||
|
||||
@@ -31,17 +31,12 @@ var (
|
||||
// https://cert-manager.io/docs/installation/supported-releases/
|
||||
certManagerVersion = "v1.8.2"
|
||||
|
||||
images = []testing.ContainerImage{
|
||||
testing.Img("docker", "dind"),
|
||||
testing.Img("quay.io/brancz/kube-rbac-proxy", "v0.10.0"),
|
||||
testing.Img("quay.io/jetstack/cert-manager-controller", certManagerVersion),
|
||||
testing.Img("quay.io/jetstack/cert-manager-cainjector", certManagerVersion),
|
||||
testing.Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion),
|
||||
}
|
||||
arcStableImageRepo = "summerwind/actions-runner-controller"
|
||||
arcStableImageTag = "v0.25.2"
|
||||
|
||||
testResultCMNamePrefix = "test-result-"
|
||||
|
||||
RunnerVersion = "2.303.0"
|
||||
RunnerVersion = "2.304.0"
|
||||
)
|
||||
|
||||
// If you're willing to run this test via VS Code "run test" or "debug test",
|
||||
@@ -105,8 +100,8 @@ func TestE2E(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
label: "stable",
|
||||
controller: "summerwind/actions-runner-controller",
|
||||
controllerVer: "v0.25.2",
|
||||
controller: arcStableImageRepo,
|
||||
controllerVer: arcStableImageTag,
|
||||
chart: "actions-runner-controller/actions-runner-controller",
|
||||
// 0.20.2 accidentally added support for runner-status-update which isn't supported by ARC 0.25.2.
|
||||
// With some chart values, the controller end up with crashlooping with `flag provided but not defined: -runner-status-update-hook`.
|
||||
@@ -423,6 +418,7 @@ type env struct {
|
||||
admissionWebhooksTimeout string
|
||||
imagePullSecretName string
|
||||
imagePullPolicy string
|
||||
dindSidecarRepositoryAndTag string
|
||||
watchNamespace string
|
||||
|
||||
vars vars
|
||||
@@ -436,6 +432,8 @@ type vars struct {
|
||||
runnerDindImageRepo string
|
||||
runnerRootlessDindImageRepo string
|
||||
|
||||
dindSidecarImageRepo, dindSidecarImageTag string
|
||||
|
||||
prebuildImages []testing.ContainerImage
|
||||
builds []testing.DockerBuild
|
||||
|
||||
@@ -458,6 +456,10 @@ func buildVars(repo, ubuntuVer string) vars {
|
||||
runnerImage = testing.Img(runnerImageRepo, runnerImageTag)
|
||||
runnerDindImage = testing.Img(runnerDindImageRepo, runnerImageTag)
|
||||
runnerRootlessDindImage = testing.Img(runnerRootlessDindImageRepo, runnerImageTag)
|
||||
|
||||
dindSidecarImageRepo = "docker"
|
||||
dindSidecarImageTag = "20.10.23-dind"
|
||||
dindSidecarImage = testing.Img(dindSidecarImageRepo, dindSidecarImageTag)
|
||||
)
|
||||
|
||||
var vs vars
|
||||
@@ -467,6 +469,9 @@ func buildVars(repo, ubuntuVer string) vars {
|
||||
vs.runnerRootlessDindImageRepo = runnerRootlessDindImageRepo
|
||||
vs.runnerImageRepo = runnerImageRepo
|
||||
|
||||
vs.dindSidecarImageRepo = dindSidecarImageRepo
|
||||
vs.dindSidecarImageTag = dindSidecarImageTag
|
||||
|
||||
// vs.controllerImage, vs.controllerImageTag
|
||||
|
||||
vs.prebuildImages = []testing.ContainerImage{
|
||||
@@ -474,6 +479,7 @@ func buildVars(repo, ubuntuVer string) vars {
|
||||
runnerImage,
|
||||
runnerDindImage,
|
||||
runnerRootlessDindImage,
|
||||
dindSidecarImage,
|
||||
}
|
||||
|
||||
vs.builds = []testing.DockerBuild{
|
||||
@@ -558,6 +564,8 @@ func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env {
|
||||
e.remoteKubeconfig = testing.Getenv(t, "ARC_E2E_REMOTE_KUBECONFIG", "")
|
||||
e.admissionWebhooksTimeout = testing.Getenv(t, "ARC_E2E_ADMISSION_WEBHOOKS_TIMEOUT", "")
|
||||
e.imagePullSecretName = testing.Getenv(t, "ARC_E2E_IMAGE_PULL_SECRET_NAME", "")
|
||||
// This should be the default for Ubuntu 20.04 based runner images
|
||||
e.dindSidecarRepositoryAndTag = vars.dindSidecarImageRepo + ":" + vars.dindSidecarImageTag
|
||||
e.vars = vars
|
||||
|
||||
if e.remoteKubeconfig != "" {
|
||||
@@ -569,6 +577,17 @@ func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env {
|
||||
e.watchNamespace = testing.Getenv(t, "TEST_WATCH_NAMESPACE", "")
|
||||
|
||||
if e.remoteKubeconfig == "" {
|
||||
images := []testing.ContainerImage{
|
||||
testing.Img(vars.dindSidecarImageRepo, vars.dindSidecarImageTag),
|
||||
testing.Img("quay.io/brancz/kube-rbac-proxy", "v0.10.0"),
|
||||
testing.Img("quay.io/jetstack/cert-manager-controller", certManagerVersion),
|
||||
testing.Img("quay.io/jetstack/cert-manager-cainjector", certManagerVersion),
|
||||
testing.Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion),
|
||||
// Otherwise kubelet would fail to pull images from DockerHub due too rate limit:
|
||||
// Warning Failed 19s kubelet Failed to pull image "summerwind/actions-runner-controller:v0.25.2": rpc error: code = Unknown desc = failed to pull and unpack image "docker.io/summerwind/actions-runner-controller:v0.25.2": failed to copy: httpReadSeeker: failed open: unexpected status code https://registry-1.docker.io/v2/summerwind/actions-runner-controller/manifests/sha256:92faf7e9f7f09a6240cdb5eb82eaf448852bdddf2fb77d0a5669fd8e5062b97b: 429 Too Many Requests - Server message: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit
|
||||
testing.Img(arcStableImageRepo, arcStableImageTag),
|
||||
}
|
||||
|
||||
e.Kind = testing.StartKind(t, k8sMinorVer, testing.Preload(images...))
|
||||
e.Env.Kubeconfig = e.Kind.Kubeconfig()
|
||||
} else {
|
||||
@@ -750,6 +769,7 @@ func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID, ch
|
||||
"ADMISSION_WEBHOOKS_TIMEOUT=" + e.admissionWebhooksTimeout,
|
||||
"IMAGE_PULL_SECRET=" + e.imagePullSecretName,
|
||||
"IMAGE_PULL_POLICY=" + e.imagePullPolicy,
|
||||
"DIND_SIDECAR_REPOSITORY_AND_TAG=" + e.dindSidecarRepositoryAndTag,
|
||||
"WATCH_NAMESPACE=" + e.watchNamespace,
|
||||
}
|
||||
|
||||
@@ -1156,10 +1176,21 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
|
||||
With: setupBuildXActionWith,
|
||||
},
|
||||
testing.Step{
|
||||
Run: "docker buildx build --platform=linux/amd64 " +
|
||||
Run: "docker buildx build --platform=linux/amd64 -t test1 --load " +
|
||||
dockerBuildCache +
|
||||
fmt.Sprintf("-f %s .", dockerfile),
|
||||
},
|
||||
testing.Step{
|
||||
Run: "docker run --rm test1",
|
||||
},
|
||||
testing.Step{
|
||||
Uses: "addnab/docker-run-action@v3",
|
||||
With: &testing.With{
|
||||
Image: "test1",
|
||||
Run: "hello",
|
||||
Shell: "sh",
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
if useSudo {
|
||||
|
||||
@@ -55,4 +55,11 @@ type With struct {
|
||||
// Needs to be "docker" in rootless mode
|
||||
// https://stackoverflow.com/questions/66142872/how-to-solve-error-with-rootless-docker-in-github-actions-self-hosted-runner-wr
|
||||
Driver string `json:"driver,omitempty"`
|
||||
|
||||
// Image is the image arg passed to docker-run-action
|
||||
Image string `json:"image,omitempty"`
|
||||
// Run is the run arg passed to docker-run-action
|
||||
Run string `json:"run,omitempty"`
|
||||
// Shell is the shell arg passed to docker-run-action
|
||||
Shell string `json:"shell,omitempty"`
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user