mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
107 Commits
actions-ru
...
v0.27.5
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
52fc819339 | ||
|
|
215b245881 | ||
|
|
a3df23b07c | ||
|
|
f5c69654e7 | ||
|
|
abc0b678d3 | ||
|
|
963ab2a748 | ||
|
|
8a41a596b6 | ||
|
|
e10c437f46 | ||
|
|
a0a3916c80 | ||
|
|
1c360d7e26 | ||
|
|
20bb860a37 | ||
|
|
6a75bc0880 | ||
|
|
78271000c0 | ||
|
|
a36b0e58b0 | ||
|
|
336e11a4e9 | ||
|
|
dcb64f0b9e | ||
|
|
0dadfc4d37 | ||
|
|
dc58f6ba13 | ||
|
|
06cbd632b8 | ||
|
|
9f33ae1507 | ||
|
|
63a6b5a7f0 | ||
|
|
fddc5bf1c8 | ||
|
|
d90ce2bed5 | ||
|
|
cd996e7c27 | ||
|
|
297442975e | ||
|
|
5271f316e6 | ||
|
|
9845a934f4 | ||
|
|
e0a7e142e0 | ||
|
|
f9a11a8b0b | ||
|
|
fde1893494 | ||
|
|
6fe8008640 | ||
|
|
2fee26ddce | ||
|
|
685f7162a4 | ||
|
|
d134dee14b | ||
|
|
c33ce998f4 | ||
|
|
78a93566af | ||
|
|
81dea9b3dc | ||
|
|
7ca3df3605 | ||
|
|
2343cd2d7b | ||
|
|
cf18cb3fb0 | ||
|
|
ae8b27a9a3 | ||
|
|
58ee5e8c4e | ||
|
|
fade63a663 | ||
|
|
ac4056f85b | ||
|
|
462d044604 | ||
|
|
94934819c4 | ||
|
|
aac811f210 | ||
|
|
e7ec736738 | ||
|
|
90ea691e72 | ||
|
|
32a653c0ca | ||
|
|
c7b2dd1764 | ||
|
|
80af7fc125 | ||
|
|
34909f0cf1 | ||
|
|
8afef51c8b | ||
|
|
032443fcfd | ||
|
|
91c8991835 | ||
|
|
c5ebe750dc | ||
|
|
34fdbf1231 | ||
|
|
44e9b7d8eb | ||
|
|
7ab516fdab | ||
|
|
e571df52b5 | ||
|
|
706ec17bf4 | ||
|
|
30355f742b | ||
|
|
8a5fb6ccb7 | ||
|
|
e930ba6e98 | ||
|
|
5ba3805a3f | ||
|
|
f798cddca1 | ||
|
|
367ee46122 | ||
|
|
f4a318fca6 | ||
|
|
4ee21cb24b | ||
|
|
102c9e1afa | ||
|
|
73e676f951 | ||
|
|
41ebb43c65 | ||
|
|
aa50b62c01 | ||
|
|
942f773fef | ||
|
|
21722a5de8 | ||
|
|
a2d4b95b79 | ||
|
|
04fb9f4fa1 | ||
|
|
8304b80955 | ||
|
|
9bd4025e9c | ||
|
|
94c089c407 | ||
|
|
9859bbc7f2 | ||
|
|
c1e2c4ef9d | ||
|
|
2ee15dbca3 | ||
|
|
a4cf626410 | ||
|
|
58f4b6ff2d | ||
|
|
22fbd10bd3 | ||
|
|
52b97139b6 | ||
|
|
3e0bc3f7be | ||
|
|
ba1ac0990b | ||
|
|
76fe43e8e0 | ||
|
|
8869ad28bb | ||
|
|
b86af190f7 | ||
|
|
1a491cbfe5 | ||
|
|
087f20fd5d | ||
|
|
a880114e57 | ||
|
|
e80bc21fa5 | ||
|
|
56754094ea | ||
|
|
8fa4520376 | ||
|
|
a804bf8b00 | ||
|
|
5dea6db412 | ||
|
|
2a0b770a63 | ||
|
|
a7ef871248 | ||
|
|
e45e4c53f1 | ||
|
|
a608abd124 | ||
|
|
02d9add322 | ||
|
|
f5ac134787 |
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.png filter=lfs diff=lfs merge=lfs -text
|
||||
@@ -23,6 +23,14 @@ inputs:
|
||||
arc-controller-namespace:
|
||||
description: 'The namespace of the configured gha-runner-scale-set-controller'
|
||||
required: true
|
||||
wait-to-finish:
|
||||
description: 'Wait for the workflow run to finish'
|
||||
required: true
|
||||
default: "true"
|
||||
wait-to-running:
|
||||
description: 'Wait for the workflow run to start running'
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
@@ -111,14 +119,43 @@ runs:
|
||||
|
||||
- name: Generate summary about the triggered workflow run
|
||||
shell: bash
|
||||
run: |
|
||||
run: |
|
||||
cat <<-EOF > $GITHUB_STEP_SUMMARY
|
||||
| **Triggered workflow run** |
|
||||
|:--------------------------:|
|
||||
| ${{steps.query_workflow.outputs.workflow_run_url}} |
|
||||
EOF
|
||||
|
||||
- name: Wait for workflow to start running
|
||||
if: inputs.wait-to-running == 'true' && inputs.wait-to-finish == 'false'
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
function sleep(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, ms))
|
||||
}
|
||||
const owner = '${{inputs.repo-owner}}'
|
||||
const repo = '${{inputs.repo-name}}'
|
||||
const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}}
|
||||
const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}}
|
||||
let count = 0
|
||||
while (count++<10) {
|
||||
await sleep(30 * 1000);
|
||||
let getRunResponse = await github.rest.actions.getWorkflowRun({
|
||||
owner: owner,
|
||||
repo: repo,
|
||||
run_id: workflow_run_id
|
||||
})
|
||||
console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`);
|
||||
if (getRunResponse.data.status == 'in_progress') {
|
||||
console.log(`Workflow run is in progress.`)
|
||||
return
|
||||
}
|
||||
}
|
||||
core.setFailed(`The triggered workflow run didn't start properly using ${{inputs.arc-name}}`)
|
||||
|
||||
- name: Wait for workflow to finish successfully
|
||||
if: inputs.wait-to-finish == 'true'
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
@@ -151,10 +188,15 @@ runs:
|
||||
}
|
||||
core.setFailed(`The triggered workflow run didn't finish properly using ${{inputs.arc-name}}`)
|
||||
|
||||
- name: cleanup
|
||||
if: inputs.wait-to-finish == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
|
||||
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-name}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
|
||||
|
||||
- name: Gather logs and cleanup
|
||||
shell: bash
|
||||
if: always()
|
||||
run: |
|
||||
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
|
||||
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-name}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
|
||||
kubectl logs deployment/arc-gha-runner-scale-set-controller -n ${{inputs.arc-controller-namespace}}
|
||||
kubectl logs deployment/arc-gha-rs-controller -n ${{inputs.arc-controller-namespace}}
|
||||
|
||||
212
.github/workflows/arc-publish-chart.yaml
vendored
Normal file
212
.github/workflows/arc-publish-chart.yaml
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
name: Publish ARC Helm Charts
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/arc-publish-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!charts/gha-runner-scale-set-controller/**'
|
||||
- '!charts/gha-runner-scale-set/**'
|
||||
- '!**.md'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
force:
|
||||
description: 'Force publish even if the chart version is not bumped'
|
||||
type: boolean
|
||||
required: true
|
||||
default: false
|
||||
|
||||
env:
|
||||
KUBE_SCORE_VERSION: 1.10.0
|
||||
HELM_VERSION: v3.8.0
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
lint-chart:
|
||||
name: Lint Chart
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v3.4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Set up kube-score
|
||||
run: |
|
||||
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||
chmod 755 kube-score
|
||||
|
||||
- name: Kube-score generated manifests
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.3.1
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: |
|
||||
ct lint --config charts/.ci/ct-config.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
uses: helm/kind-action@v1.4.0
|
||||
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
- name: Install cert-manager
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct install --config charts/.ci/ct-config.yaml
|
||||
|
||||
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
|
||||
- name: Check if Chart Publish is Needed
|
||||
id: publish-chart-step
|
||||
run: |
|
||||
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
|
||||
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
|
||||
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
|
||||
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
|
||||
|
||||
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
|
||||
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
|
||||
|
||||
# Always publish if force is true
|
||||
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
|
||||
echo "publish=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "publish=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
publish-chart:
|
||||
if: needs.lint-chart.outputs.publish-chart == 'true'
|
||||
needs: lint-chart
|
||||
name: Publish Chart
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write # for helm/chart-releaser-action to push chart release and create a release
|
||||
env:
|
||||
CHART_TARGET_ORG: actions-runner-controller
|
||||
CHART_TARGET_REPO: actions-runner-controller.github.io
|
||||
CHART_TARGET_BRANCH: master
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
organization: ${{ env.CHART_TARGET_ORG }}
|
||||
|
||||
- name: Install chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.4.1
|
||||
with:
|
||||
install_only: true
|
||||
install_dir: ${{ github.workspace }}/bin
|
||||
|
||||
- name: Package and upload release assets
|
||||
run: |
|
||||
cr package \
|
||||
${{ github.workspace }}/charts/actions-runner-controller/ \
|
||||
--package-path .cr-release-packages
|
||||
|
||||
cr upload \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--package-path .cr-release-packages \
|
||||
--token ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate updated index.yaml
|
||||
run: |
|
||||
cr index \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--index-path ${{ github.workspace }}/index.yaml \
|
||||
--token ${{ secrets.GITHUB_TOKEN }} \
|
||||
--push \
|
||||
--pages-branch 'gh-pages' \
|
||||
--pages-index-path 'index.yaml'
|
||||
|
||||
# Chart Release was never intended to publish to a different repo
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
- name: Checkout target repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||
path: ${{ env.CHART_TARGET_REPO }}
|
||||
ref: ${{ env.CHART_TARGET_BRANCH }}
|
||||
token: ${{ steps.get_workflow_token.outputs.token }}
|
||||
|
||||
- name: Copy index.yaml
|
||||
run: |
|
||||
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
|
||||
|
||||
- name: Commit and push to target repository
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
git add .
|
||||
git commit -m "Update index.yaml"
|
||||
git push
|
||||
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Publish ARC
|
||||
name: Publish ARC Image
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
@@ -25,6 +25,10 @@ env:
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_REPO: actions-runner-controller
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
release-controller:
|
||||
name: Release
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Runners
|
||||
name: Release ARC Runner Images
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
@@ -10,7 +10,7 @@ on:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'runner/VERSION'
|
||||
- '.github/workflows/release-runners.yaml'
|
||||
- '.github/workflows/arc-release-runners.yaml'
|
||||
|
||||
env:
|
||||
# Safeguard to prevent pushing images to registeries after build
|
||||
@@ -18,7 +18,10 @@ env:
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_WORKFLOW: release-runners.yaml
|
||||
DOCKER_VERSION: 20.10.23
|
||||
RUNNER_CONTAINER_HOOKS_VERSION: 0.2.0
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-runners:
|
||||
@@ -27,10 +30,12 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Get runner version
|
||||
id: runner_version
|
||||
id: versions
|
||||
run: |
|
||||
version=$(echo -n $(cat runner/VERSION))
|
||||
echo runner_version=$version >> $GITHUB_OUTPUT
|
||||
runner_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))"
|
||||
container_hooks_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))"
|
||||
echo runner_version=$runner_current_version >> $GITHUB_OUTPUT
|
||||
echo container_hooks_version=$container_hooks_current_version >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
@@ -42,7 +47,8 @@ jobs:
|
||||
|
||||
- name: Trigger Build And Push Runner Images To Registries
|
||||
env:
|
||||
RUNNER_VERSION: ${{ steps.runner_version.outputs.runner_version }}
|
||||
RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }}
|
||||
CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }}
|
||||
run: |
|
||||
# Authenticate
|
||||
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
|
||||
@@ -51,20 +57,21 @@ jobs:
|
||||
gh workflow run ${{ env.TARGET_WORKFLOW }} -R ${{ env.TARGET_ORG }}/releases \
|
||||
-f runner_version=${{ env.RUNNER_VERSION }} \
|
||||
-f docker_version=${{ env.DOCKER_VERSION }} \
|
||||
-f runner_container_hooks_version=${{ env.RUNNER_CONTAINER_HOOKS_VERSION }} \
|
||||
-f runner_container_hooks_version=${{ env.CONTAINER_HOOKS_VERSION }} \
|
||||
-f sha='${{ github.sha }}' \
|
||||
-f push_to_registries=${{ env.PUSH_TO_REGISTRIES }}
|
||||
|
||||
- name: Job summary
|
||||
env:
|
||||
RUNNER_VERSION: ${{ steps.runner_version.outputs.runner_version }}
|
||||
RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }}
|
||||
CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }}
|
||||
run: |
|
||||
echo "The [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- runner_version: ${{ env.RUNNER_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- docker_version: ${{ env.DOCKER_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- runner_container_hooks_version: ${{ env.RUNNER_CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- runner_container_hooks_version: ${{ env.CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- push_to_registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
149
.github/workflows/arc-update-runners-scheduled.yaml
vendored
Normal file
149
.github/workflows/arc-update-runners-scheduled.yaml
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
# This workflows polls releases from actions/runner and in case of a new one it
|
||||
# updates files containing runner version and opens a pull request.
|
||||
name: Runner Updates Check (Scheduled Job)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# run daily
|
||||
- cron: "0 9 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
# check_versions compares our current version and the latest available runner
|
||||
# version and sets them as outputs.
|
||||
check_versions:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
outputs:
|
||||
runner_current_version: ${{ steps.runner_versions.outputs.runner_current_version }}
|
||||
runner_latest_version: ${{ steps.runner_versions.outputs.runner_latest_version }}
|
||||
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
|
||||
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get runner current and latest versions
|
||||
id: runner_versions
|
||||
run: |
|
||||
CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))"
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo runner_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner | grep -oP '(?<=v)[0-9.]+' | head -1)
|
||||
echo "Latest version: $LATEST_VERSION"
|
||||
echo runner_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get container-hooks current and latest versions
|
||||
id: container_hooks_versions
|
||||
run: |
|
||||
CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))"
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo container_hooks_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner-container-hooks | grep -oP '(?<=v)[0-9.]+' | head -1)
|
||||
echo "Latest version: $LATEST_VERSION"
|
||||
echo container_hooks_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
# check_pr checks if a PR for the same update already exists. It only runs if
|
||||
# runner latest version != our current version. If no existing PR is found,
|
||||
# it sets a PR name as output.
|
||||
check_pr:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check_versions
|
||||
if: needs.check_versions.outputs.runner_current_version != needs.check_versions.outputs.runner_latest_version || needs.check_versions.outputs.container_hooks_current_version != needs.check_versions.outputs.container_hooks_latest_version
|
||||
outputs:
|
||||
pr_name: ${{ steps.pr_name.outputs.pr_name }}
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
steps:
|
||||
- name: debug
|
||||
run:
|
||||
echo "RUNNER_CURRENT_VERSION=${{ needs.check_versions.outputs.runner_current_version }}"
|
||||
echo "RUNNER_LATEST_VERSION=${{ needs.check_versions.outputs.runner_latest_version }}"
|
||||
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
|
||||
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: PR Name
|
||||
id: pr_name
|
||||
env:
|
||||
RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }}
|
||||
RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }}
|
||||
CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }}
|
||||
CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }}
|
||||
# Generate a PR name with the following title:
|
||||
# Updates: runner to v2.304.0 and container-hooks to v0.3.1
|
||||
run: |
|
||||
RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}"
|
||||
CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}"
|
||||
|
||||
PR_NAME="Updates:"
|
||||
if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ]
|
||||
then
|
||||
PR_NAME="$PR_NAME $RUNNER_MESSAGE"
|
||||
fi
|
||||
if [ "$CONTAINER_HOOKS_CURRENT_VERSION" != "$CONTAINER_HOOKS_LATEST_VERSION" ]
|
||||
then
|
||||
PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE"
|
||||
fi
|
||||
|
||||
result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1)
|
||||
if [ -z "$result" ]
|
||||
then
|
||||
echo "No existing PRs found, setting output with pr_name=$PR_NAME"
|
||||
echo pr_name=$PR_NAME >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Found a PR with title '$PR_NAME' already existing: ${{ github.server_url }}/${{ github.repository }}/pull/$result"
|
||||
fi
|
||||
|
||||
# update_version updates runner version in the files listed below, commits
|
||||
# the changes and opens a pull request as `github-actions` bot.
|
||||
update_version:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- check_versions
|
||||
- check_pr
|
||||
if: needs.check_pr.outputs.pr_name
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
actions: write
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }}
|
||||
RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }}
|
||||
CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }}
|
||||
CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }}
|
||||
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: New branch
|
||||
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
|
||||
|
||||
- name: Update files
|
||||
run: |
|
||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/VERSION
|
||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" runner/Makefile
|
||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" Makefile
|
||||
sed -i "s/$RUNNER_CURRENT_VERSION/$RUNNER_LATEST_VERSION/g" test/e2e/e2e_test.go
|
||||
|
||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/VERSION
|
||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" runner/Makefile
|
||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" Makefile
|
||||
sed -i "s/$CONTAINER_HOOKS_CURRENT_VERSION/$CONTAINER_HOOKS_LATEST_VERSION/g" test/e2e/e2e_test.go
|
||||
|
||||
- name: Commit changes
|
||||
run: |
|
||||
# from https://github.com/orgs/community/discussions/26560
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "github-actions[bot]"
|
||||
git add .
|
||||
git commit -m "$PR_NAME"
|
||||
git push -u origin HEAD
|
||||
|
||||
- name: Create pull request
|
||||
run: gh pr create -f -l "runners update"
|
||||
@@ -6,7 +6,7 @@ on:
|
||||
- master
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/validate-chart.yaml'
|
||||
- '.github/workflows/arc-validate-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!**.md'
|
||||
- '!charts/gha-runner-scale-set-controller/**'
|
||||
@@ -14,7 +14,7 @@ on:
|
||||
push:
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/validate-chart.yaml'
|
||||
- '.github/workflows/arc-validate-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!**.md'
|
||||
- '!charts/gha-runner-scale-set-controller/**'
|
||||
@@ -27,6 +27,13 @@ env:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
# but not pushes to master branch by making the concurrency group name unique
|
||||
# for pushes
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
validate-chart:
|
||||
name: Lint Chart
|
||||
@@ -65,7 +72,7 @@ jobs:
|
||||
python-version: '3.7'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.3.1
|
||||
uses: helm/chart-testing-action@v2.4.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Validate Runners
|
||||
name: Validate ARC Runners
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -12,6 +12,13 @@ on:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
# but not pushes to master branch by making the concurrency group name unique
|
||||
# for pushes
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
shellcheck:
|
||||
name: runner / shellcheck
|
||||
@@ -1,11 +1,11 @@
|
||||
name: CI ARC E2E Linux VM Test
|
||||
name: (gha) E2E Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
@@ -16,11 +16,19 @@ env:
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_REPO: arc_e2e_test_dummy
|
||||
IMAGE_NAME: "arc-test-image"
|
||||
IMAGE_VERSION: "dev"
|
||||
IMAGE_VERSION: "0.5.0"
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
# but not pushes to master branch by making the concurrency group name unique
|
||||
# for pushes
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
default-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
@@ -50,20 +58,21 @@ jobs:
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
@@ -84,11 +93,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -107,6 +117,7 @@ jobs:
|
||||
|
||||
single-namespace-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
@@ -138,20 +149,21 @@ jobs:
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
@@ -172,11 +184,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -195,6 +208,7 @@ jobs:
|
||||
|
||||
dind-mode-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: arc-test-dind-workflow.yaml
|
||||
@@ -224,20 +238,21 @@ jobs:
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
@@ -259,11 +274,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -282,6 +298,7 @@ jobs:
|
||||
|
||||
kubernetes-mode-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
|
||||
@@ -316,20 +333,21 @@ jobs:
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n openebs -l name=openebs-localpv-provisioner
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
@@ -355,11 +373,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -378,6 +397,7 @@ jobs:
|
||||
|
||||
auth-proxy-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
@@ -407,20 +427,21 @@ jobs:
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
@@ -453,11 +474,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -476,6 +498,7 @@ jobs:
|
||||
|
||||
anonymous-proxy-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
@@ -505,20 +528,21 @@ jobs:
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
@@ -545,11 +569,12 @@ jobs:
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
@@ -565,3 +590,293 @@ jobs:
|
||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||
arc-namespace: "arc-runners"
|
||||
arc-controller-namespace: "arc-systems"
|
||||
|
||||
self-signed-ca-setup:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
- uses: ./.github/actions/setup-arc-e2e
|
||||
id: setup
|
||||
with:
|
||||
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||
image-name: ${{env.IMAGE_NAME}}
|
||||
image-tag: ${{env.IMAGE_VERSION}}
|
||||
target-org: ${{env.TARGET_ORG}}
|
||||
|
||||
- name: Install gha-runner-scale-set-controller
|
||||
id: install_arc_controller
|
||||
run: |
|
||||
helm install arc \
|
||||
--namespace "arc-systems" \
|
||||
--create-namespace \
|
||||
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||
./charts/gha-runner-scale-set-controller \
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
run: |
|
||||
docker run -d \
|
||||
--rm \
|
||||
--name mitmproxy \
|
||||
--publish 8080:8080 \
|
||||
-v ${{ github.workspace }}/mitmproxy:/home/mitmproxy/.mitmproxy \
|
||||
mitmproxy/mitmproxy:latest \
|
||||
mitmdump
|
||||
count=0
|
||||
while true; do
|
||||
if [ -f "${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem" ]; then
|
||||
echo "CA cert generated"
|
||||
cat ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for mitmproxy generate its CA cert"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
sudo cp ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
|
||||
sudo chown runner ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
|
||||
kubectl create namespace arc-runners
|
||||
kubectl -n arc-runners create configmap ca-cert --from-file="${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt"
|
||||
kubectl -n arc-runners get configmap ca-cert -o yaml
|
||||
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||
helm install "$ARC_NAME" \
|
||||
--namespace "arc-runners" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||
--set proxy.https.url="http://host.minikube.internal:8080" \
|
||||
--set "proxy.noProxy[0]=10.96.0.1:443" \
|
||||
--set "githubServerTLS.certificateFrom.configMapKeyRef.name=ca-cert" \
|
||||
--set "githubServerTLS.certificateFrom.configMapKeyRef.key=mitmproxy-ca-cert.crt" \
|
||||
--set "githubServerTLS.runnerMountPath=/usr/local/share/ca-certificates/" \
|
||||
./charts/gha-runner-scale-set \
|
||||
--debug
|
||||
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
|
||||
- name: Test ARC E2E
|
||||
uses: ./.github/actions/execute-assert-arc-e2e
|
||||
timeout-minutes: 10
|
||||
with:
|
||||
auth-token: ${{ steps.setup.outputs.token }}
|
||||
repo-owner: ${{ env.TARGET_ORG }}
|
||||
repo-name: ${{env.TARGET_REPO}}
|
||||
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||
arc-namespace: "arc-runners"
|
||||
arc-controller-namespace: "arc-systems"
|
||||
|
||||
update-strategy-tests:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
- uses: ./.github/actions/setup-arc-e2e
|
||||
id: setup
|
||||
with:
|
||||
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||
image-name: ${{env.IMAGE_NAME}}
|
||||
image-tag: ${{env.IMAGE_VERSION}}
|
||||
target-org: ${{env.TARGET_ORG}}
|
||||
|
||||
- name: Install gha-runner-scale-set-controller
|
||||
id: install_arc_controller
|
||||
run: |
|
||||
helm install arc \
|
||||
--namespace "arc-systems" \
|
||||
--create-namespace \
|
||||
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||
--set flags.updateStrategy="eventual" \
|
||||
./charts/gha-runner-scale-set-controller \
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-rs-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-rs-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-rs-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
run: |
|
||||
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||
helm install "$ARC_NAME" \
|
||||
--namespace "arc-runners" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||
./charts/gha-runner-scale-set \
|
||||
--debug
|
||||
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
|
||||
- name: Trigger long running jobs and wait for runners to pick them up
|
||||
uses: ./.github/actions/execute-assert-arc-e2e
|
||||
timeout-minutes: 10
|
||||
with:
|
||||
auth-token: ${{ steps.setup.outputs.token }}
|
||||
repo-owner: ${{ env.TARGET_ORG }}
|
||||
repo-name: ${{env.TARGET_REPO}}
|
||||
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||
arc-namespace: "arc-runners"
|
||||
arc-controller-namespace: "arc-systems"
|
||||
wait-to-running: "true"
|
||||
wait-to-finish: "false"
|
||||
|
||||
- name: Upgrade the gha-runner-scale-set
|
||||
shell: bash
|
||||
run: |
|
||||
helm upgrade --install "${{ steps.install_arc.outputs.ARC_NAME }}" \
|
||||
--namespace "arc-runners" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{ env.TARGET_REPO }}" \
|
||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||
--set template.spec.containers[0].name="runner" \
|
||||
--set template.spec.containers[0].image="ghcr.io/actions/actions-runner:latest" \
|
||||
--set template.spec.containers[0].command={"/home/runner/run.sh"} \
|
||||
--set template.spec.containers[0].env[0].name="TEST" \
|
||||
--set template.spec.containers[0].env[0].value="E2E TESTS" \
|
||||
./charts/gha-runner-scale-set \
|
||||
--debug
|
||||
|
||||
- name: Assert that the listener is deleted while jobs are running
|
||||
shell: bash
|
||||
run: |
|
||||
count=0
|
||||
while true; do
|
||||
LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name=${{ steps.install_arc.outputs.ARC_NAME }} -n arc-systems --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||
RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n arc-runners --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||
RESOURCES="$(kubectl get pods -A)"
|
||||
|
||||
if [ "$LISTENER_COUNT" -eq 0 ]; then
|
||||
echo "Listener has been deleted"
|
||||
echo "$RESOURCES"
|
||||
exit 0
|
||||
fi
|
||||
if [ "$count" -ge 60 ]; then
|
||||
echo "Timeout waiting for listener to be deleted"
|
||||
echo "$RESOURCES"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Waiting for listener to be deleted"
|
||||
echo "Listener count: $LISTENER_COUNT target: 0 | Runners count: $RUNNERS_COUNT target: 3"
|
||||
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
|
||||
- name: Assert that the listener goes back up after the jobs are done
|
||||
shell: bash
|
||||
run: |
|
||||
count=0
|
||||
while true; do
|
||||
LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name=${{ steps.install_arc.outputs.ARC_NAME }} -n arc-systems --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||
RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n arc-runners --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
|
||||
RESOURCES="$(kubectl get pods -A)"
|
||||
|
||||
if [ "$LISTENER_COUNT" -eq 1 ]; then
|
||||
echo "Listener is up!"
|
||||
echo "$RESOURCES"
|
||||
exit 0
|
||||
fi
|
||||
if [ "$count" -ge 120 ]; then
|
||||
echo "Timeout waiting for listener to be recreated"
|
||||
echo "$RESOURCES"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Waiting for listener to be recreated"
|
||||
echo "Listener count: $LISTENER_COUNT target: 1 | Runners count: $RUNNERS_COUNT target: 0"
|
||||
|
||||
sleep 1
|
||||
count=$((count+1))
|
||||
done
|
||||
|
||||
- name: Gather logs and cleanup
|
||||
shell: bash
|
||||
if: always()
|
||||
run: |
|
||||
helm uninstall "${{ steps.install_arc.outputs.ARC_NAME }}" --namespace "arc-runners" --debug
|
||||
kubectl wait --timeout=10s --for=delete AutoScalingRunnerSet -n "${{ steps.install_arc.outputs.ARC_NAME }}" -l app.kubernetes.io/instance="${{ steps.install_arc.outputs.ARC_NAME }}"
|
||||
kubectl logs deployment/arc-gha-rs-controller -n "arc-systems"
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Publish Runner Scale Set Controller Charts
|
||||
name: (gha) Publish Helm Charts
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -33,10 +33,14 @@ env:
|
||||
HELM_VERSION: v3.8.0
|
||||
|
||||
permissions:
|
||||
packages: write
|
||||
packages: write
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-push-image:
|
||||
build-push-image:
|
||||
name: Build and push controller image
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -46,7 +50,14 @@ jobs:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
|
||||
- name: Resolve parameters
|
||||
- name: Check chart versions
|
||||
# Binary version and chart versions need to match.
|
||||
# In case of an upgrade, the controller will try to clean up
|
||||
# resources with older versions that should have been cleaned up
|
||||
# during the upgrade process
|
||||
run: ./hack/check-gh-chart-versions.sh ${{ inputs.release_tag_name }}
|
||||
|
||||
- name: Resolve parameters
|
||||
id: resolve_parameters
|
||||
run: |
|
||||
resolvedRef="${{ inputs.ref }}"
|
||||
@@ -67,7 +78,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||
# BuildKit v0.11 which has a bug causing intermittent
|
||||
# BuildKit v0.11 which has a bug causing intermittent
|
||||
# failures pushing images to GHCR
|
||||
version: v0.9.1
|
||||
driver-opts: image=moby/buildkit:v0.10.6
|
||||
@@ -94,7 +105,7 @@ jobs:
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "The [publish-runner-scale-set.yaml](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/publish-runner-scale-set.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "The [gha-publish-chart.yaml](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/gha-publish-chart.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
|
||||
@@ -115,7 +126,7 @@ jobs:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
|
||||
- name: Resolve parameters
|
||||
- name: Resolve parameters
|
||||
id: resolve_parameters
|
||||
run: |
|
||||
resolvedRef="${{ inputs.ref }}"
|
||||
@@ -126,7 +137,7 @@ jobs:
|
||||
echo "INFO: Resolving short SHA for $resolvedRef"
|
||||
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
|
||||
echo "INFO: Normalizing repository name (lowercase)"
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
|
||||
@@ -163,7 +174,7 @@ jobs:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
|
||||
- name: Resolve parameters
|
||||
- name: Resolve parameters
|
||||
id: resolve_parameters
|
||||
run: |
|
||||
resolvedRef="${{ inputs.ref }}"
|
||||
@@ -174,7 +185,7 @@ jobs:
|
||||
echo "INFO: Resolving short SHA for $resolvedRef"
|
||||
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
|
||||
echo "INFO: Normalizing repository name (lowercase)"
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Validate Helm Chart (gha-runner-scale-set-controller and gha-runner-scale-set)
|
||||
name: (gha) Validate Helm Charts
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -6,13 +6,13 @@ on:
|
||||
- master
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/validate-gha-chart.yaml'
|
||||
- '.github/workflows/gha-validate-chart.yaml'
|
||||
- '!charts/actions-runner-controller/**'
|
||||
- '!**.md'
|
||||
push:
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/validate-gha-chart.yaml'
|
||||
- '.github/workflows/gha-validate-chart.yaml'
|
||||
- '!charts/actions-runner-controller/**'
|
||||
- '!**.md'
|
||||
workflow_dispatch:
|
||||
@@ -23,6 +23,13 @@ env:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
# but not pushes to master branch by making the concurrency group name unique
|
||||
# for pushes
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
validate-chart:
|
||||
name: Lint Chart
|
||||
@@ -61,23 +68,7 @@ jobs:
|
||||
python-version: '3.7'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.3.1
|
||||
|
||||
- name: Set up latest version chart-testing
|
||||
run: |
|
||||
echo 'deb [trusted=yes] https://repo.goreleaser.com/apt/ /' | sudo tee /etc/apt/sources.list.d/goreleaser.list
|
||||
sudo apt update
|
||||
sudo apt install goreleaser
|
||||
git clone https://github.com/helm/chart-testing
|
||||
cd chart-testing
|
||||
unset CT_CONFIG_DIR
|
||||
goreleaser build --clean --skip-validate
|
||||
./dist/chart-testing_linux_amd64_v1/ct version
|
||||
echo 'Adding ct directory to PATH...'
|
||||
echo "$RUNNER_TEMP/chart-testing/dist/chart-testing_linux_amd64_v1" >> "$GITHUB_PATH"
|
||||
echo 'Setting CT_CONFIG_DIR...'
|
||||
echo "CT_CONFIG_DIR=$RUNNER_TEMP/chart-testing/etc" >> "$GITHUB_ENV"
|
||||
working-directory: ${{ runner.temp }}
|
||||
uses: helm/chart-testing-action@v2.4.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
@@ -107,7 +98,7 @@ jobs:
|
||||
load: true
|
||||
build-args: |
|
||||
DOCKER_IMAGE_NAME=test-arc
|
||||
VERSION=dev
|
||||
VERSION=dev
|
||||
tags: |
|
||||
test-arc:dev
|
||||
cache-from: type=gha
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Publish Canary Image
|
||||
name: Publish Canary Images
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
@@ -11,19 +11,19 @@ on:
|
||||
- '.github/actions/**'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/workflows/e2e-test-dispatch-workflow.yaml'
|
||||
- '.github/workflows/e2e-test-linux-vm.yaml'
|
||||
- '.github/workflows/publish-arc.yaml'
|
||||
- '.github/workflows/publish-chart.yaml'
|
||||
- '.github/workflows/publish-runner-scale-set.yaml'
|
||||
- '.github/workflows/release-runners.yaml'
|
||||
- '.github/workflows/run-codeql.yaml'
|
||||
- '.github/workflows/run-first-interaction.yaml'
|
||||
- '.github/workflows/run-stale.yaml'
|
||||
- '.github/workflows/update-runners.yaml'
|
||||
- '.github/workflows/gha-e2e-tests.yaml'
|
||||
- '.github/workflows/arc-publish.yaml'
|
||||
- '.github/workflows/arc-publish-chart.yaml'
|
||||
- '.github/workflows/gha-publish-chart.yaml'
|
||||
- '.github/workflows/arc-release-runners.yaml'
|
||||
- '.github/workflows/global-run-codeql.yaml'
|
||||
- '.github/workflows/global-run-first-interaction.yaml'
|
||||
- '.github/workflows/global-run-stale.yaml'
|
||||
- '.github/workflows/arc-update-runners-scheduled.yaml'
|
||||
- '.github/workflows/validate-arc.yaml'
|
||||
- '.github/workflows/validate-chart.yaml'
|
||||
- '.github/workflows/validate-gha-chart.yaml'
|
||||
- '.github/workflows/validate-runners.yaml'
|
||||
- '.github/workflows/arc-validate-chart.yaml'
|
||||
- '.github/workflows/gha-validate-chart.yaml'
|
||||
- '.github/workflows/arc-validate-runners.yaml'
|
||||
- '.github/dependabot.yml'
|
||||
- '.github/RELEASE_NOTE_TEMPLATE.md'
|
||||
- 'runner/**'
|
||||
@@ -37,6 +37,10 @@ permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
# Safeguard to prevent pushing images to registeries after build
|
||||
PUSH_TO_REGISTRIES: true
|
||||
@@ -126,4 +130,4 @@ jobs:
|
||||
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary
|
||||
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:canary-${{ steps.resolve_parameters.outputs.short_sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
cache-to: type=gha,mode=max
|
||||
@@ -10,6 +10,13 @@ on:
|
||||
schedule:
|
||||
- cron: '30 1 * * 0'
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
# but not pushes to master branch by making the concurrency group name unique
|
||||
# for pushes
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
@@ -1,4 +1,4 @@
|
||||
name: first-interaction
|
||||
name: First Interaction
|
||||
|
||||
on:
|
||||
issues:
|
||||
16
.github/workflows/go.yaml
vendored
16
.github/workflows/go.yaml
vendored
@@ -8,7 +8,6 @@ on:
|
||||
- '**.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/go.yaml'
|
||||
@@ -19,6 +18,13 @@ on:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
# but not pushes to master branch by making the concurrency group name unique
|
||||
# for pushes
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -72,9 +78,11 @@ jobs:
|
||||
run: git diff --exit-code
|
||||
- name: Install kubebuilder
|
||||
run: |
|
||||
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
|
||||
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
|
||||
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
|
||||
curl -D headers.txt -fsL "https://storage.googleapis.com/kubebuilder-tools/kubebuilder-tools-1.26.1-linux-amd64.tar.gz" -o kubebuilder-tools
|
||||
echo "$(grep -i etag headers.txt -m 1 | cut -d'"' -f2) kubebuilder-tools" > sum
|
||||
md5sum -c sum
|
||||
tar -zvxf kubebuilder-tools
|
||||
sudo mv kubebuilder /usr/local/
|
||||
- name: Run go tests
|
||||
run: |
|
||||
go test -short `go list ./... | grep -v ./test_e2e_arc`
|
||||
|
||||
205
.github/workflows/publish-chart.yaml
vendored
205
.github/workflows/publish-chart.yaml
vendored
@@ -1,205 +0,0 @@
|
||||
name: Publish Helm Chart
|
||||
|
||||
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||
# for details on why we use this approach
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/publish-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!charts/gha-runner-scale-set-controller/**'
|
||||
- '!charts/gha-runner-scale-set/**'
|
||||
- '!**.md'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
KUBE_SCORE_VERSION: 1.10.0
|
||||
HELM_VERSION: v3.8.0
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint-chart:
|
||||
name: Lint Chart
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v3.4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Set up kube-score
|
||||
run: |
|
||||
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||
chmod 755 kube-score
|
||||
|
||||
- name: Kube-score generated manifests
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
|
||||
--ignore-test pod-networkpolicy
|
||||
--ignore-test deployment-has-poddisruptionbudget
|
||||
--ignore-test deployment-has-host-podantiaffinity
|
||||
--ignore-test container-security-context
|
||||
--ignore-test pod-probes
|
||||
--ignore-test container-image-tag
|
||||
--enable-optional-test container-security-context-privileged
|
||||
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.3.1
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: |
|
||||
ct lint --config charts/.ci/ct-config.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
uses: helm/kind-action@v1.4.0
|
||||
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
- name: Install cert-manager
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct install --config charts/.ci/ct-config.yaml
|
||||
|
||||
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
|
||||
- name: Check if Chart Publish is Needed
|
||||
id: publish-chart-step
|
||||
run: |
|
||||
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
|
||||
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
|
||||
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
|
||||
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
|
||||
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
|
||||
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
|
||||
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION ]]; then
|
||||
echo "publish=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "publish=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
publish-chart:
|
||||
if: needs.lint-chart.outputs.publish-chart == 'true'
|
||||
needs: lint-chart
|
||||
name: Publish Chart
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write # for helm/chart-releaser-action to push chart release and create a release
|
||||
env:
|
||||
CHART_TARGET_ORG: actions-runner-controller
|
||||
CHART_TARGET_REPO: actions-runner-controller.github.io
|
||||
CHART_TARGET_BRANCH: master
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
organization: ${{ env.CHART_TARGET_ORG }}
|
||||
|
||||
- name: Install chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.4.1
|
||||
with:
|
||||
install_only: true
|
||||
install_dir: ${{ github.workspace }}/bin
|
||||
|
||||
- name: Package and upload release assets
|
||||
run: |
|
||||
cr package \
|
||||
${{ github.workspace }}/charts/actions-runner-controller/ \
|
||||
--package-path .cr-release-packages
|
||||
|
||||
cr upload \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--package-path .cr-release-packages \
|
||||
--token ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate updated index.yaml
|
||||
run: |
|
||||
cr index \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--index-path ${{ github.workspace }}/index.yaml \
|
||||
--pages-branch 'gh-pages' \
|
||||
--pages-index-path 'index.yaml'
|
||||
|
||||
# Chart Release was never intended to publish to a different repo
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
- name: Checkout pages repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||
path: ${{ env.CHART_TARGET_REPO }}
|
||||
ref: ${{ env.CHART_TARGET_BRANCH }}
|
||||
token: ${{ steps.get_workflow_token.outputs.token }}
|
||||
|
||||
- name: Copy index.yaml
|
||||
run: |
|
||||
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
|
||||
|
||||
- name: Commit and push
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
git add .
|
||||
git commit -m "Update index.yaml"
|
||||
git push
|
||||
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/main/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
|
||||
109
.github/workflows/update-runners.yaml
vendored
109
.github/workflows/update-runners.yaml
vendored
@@ -1,109 +0,0 @@
|
||||
# This workflows polls releases from actions/runner and in case of a new one it
|
||||
# updates files containing runner version and opens a pull request.
|
||||
name: Update runners
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# run daily
|
||||
- cron: "0 9 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
# check_versions compares our current version and the latest available runner
|
||||
# version and sets them as outputs.
|
||||
check_versions:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
outputs:
|
||||
current_version: ${{ steps.versions.outputs.current_version }}
|
||||
latest_version: ${{ steps.versions.outputs.latest_version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get current and latest versions
|
||||
id: versions
|
||||
run: |
|
||||
CURRENT_VERSION=$(echo -n $(cat runner/VERSION))
|
||||
echo "Current version: $CURRENT_VERSION"
|
||||
echo current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner | grep -oP '(?<=v)[0-9.]+' | head -1)
|
||||
echo "Latest version: $LATEST_VERSION"
|
||||
echo latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
|
||||
|
||||
# check_pr checks if a PR for the same update already exists. It only runs if
|
||||
# runner latest version != our current version. If no existing PR is found,
|
||||
# it sets a PR name as output.
|
||||
check_pr:
|
||||
runs-on: ubuntu-latest
|
||||
needs: check_versions
|
||||
if: needs.check_versions.outputs.current_version != needs.check_versions.outputs.latest_version
|
||||
outputs:
|
||||
pr_name: ${{ steps.pr_name.outputs.pr_name }}
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
steps:
|
||||
- name: debug
|
||||
run:
|
||||
echo ${{ needs.check_versions.outputs.current_version }}
|
||||
echo ${{ needs.check_versions.outputs.latest_version }}
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: PR Name
|
||||
id: pr_name
|
||||
env:
|
||||
LATEST_VERSION: ${{ needs.check_versions.outputs.latest_version }}
|
||||
run: |
|
||||
PR_NAME="Update runner to version ${LATEST_VERSION}"
|
||||
|
||||
result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1)
|
||||
if [ -z "$result" ]
|
||||
then
|
||||
echo "No existing PRs found, setting output with pr_name=$PR_NAME"
|
||||
echo pr_name=$PR_NAME >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Found a PR with title '$PR_NAME' already existing: ${{ github.server_url }}/${{ github.repository }}/pull/$result"
|
||||
fi
|
||||
|
||||
# update_version updates runner version in the files listed below, commits
|
||||
# the changes and opens a pull request as `github-actions` bot.
|
||||
update_version:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- check_versions
|
||||
- check_pr
|
||||
if: needs.check_pr.outputs.pr_name
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
actions: write
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
CURRENT_VERSION: ${{ needs.check_versions.outputs.current_version }}
|
||||
LATEST_VERSION: ${{ needs.check_versions.outputs.latest_version }}
|
||||
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: New branch
|
||||
run: git checkout -b update-runner-$LATEST_VERSION
|
||||
- name: Update files
|
||||
run: |
|
||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
|
||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
|
||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
|
||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
|
||||
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" .github/workflows/e2e-test-linux-vm.yaml
|
||||
|
||||
- name: Commit changes
|
||||
run: |
|
||||
# from https://github.com/orgs/community/discussions/26560
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "github-actions[bot]"
|
||||
git add .
|
||||
git commit -m "$PR_NAME"
|
||||
git push -u origin HEAD
|
||||
|
||||
- name: Create pull request
|
||||
run: gh pr create -f
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -35,3 +35,4 @@ bin
|
||||
.DS_STORE
|
||||
|
||||
/test-assets
|
||||
/.tools
|
||||
|
||||
164
CONTRIBUTING.md
164
CONTRIBUTING.md
@@ -15,6 +15,13 @@
|
||||
- [Opening the Pull Request](#opening-the-pull-request)
|
||||
- [Helm Version Changes](#helm-version-changes)
|
||||
- [Testing Controller Built from a Pull Request](#testing-controller-built-from-a-pull-request)
|
||||
- [Release process](#release-process)
|
||||
- [Workflow structure](#workflow-structure)
|
||||
- [Releasing legacy actions-runner-controller image and helm charts](#releasing-legacy-actions-runner-controller-image-and-helm-charts)
|
||||
- [Release actions-runner-controller runner images](#release-actions-runner-controller-runner-images)
|
||||
- [Release gha-runner-scale-set-controller image and helm charts](#release-gha-runner-scale-set-controller-image-and-helm-charts)
|
||||
- [Release actions/runner image](#release-actionsrunner-image)
|
||||
- [Canary releases](#canary-releases)
|
||||
|
||||
## Welcome
|
||||
|
||||
@@ -25,14 +32,13 @@ reviewed and merged.
|
||||
|
||||
## Before contributing code
|
||||
|
||||
We welcome code patches, but to make sure things are well coordinated you should discuss any significant change before starting the work.
|
||||
The maintainers ask that you signal your intention to contribute to the project using the issue tracker.
|
||||
If there is an existing issue that you want to work on, please let us know so we can get it assigned to you.
|
||||
If you noticed a bug or want to add a new feature, there are issue templates you can fill out.
|
||||
We welcome code patches, but to make sure things are well coordinated you should discuss any significant change before starting the work. The maintainers ask that you signal your intention to contribute to the project using the issue tracker. If there is an existing issue that you want to work on, please let us know so we can get it assigned to you. If you noticed a bug or want to add a new feature, there are issue templates you can fill out.
|
||||
|
||||
When filing a feature request, the maintainers will review the change and give you a decision on whether we are willing to accept the feature into the project.
|
||||
|
||||
For significantly large and/or complex features, we may request that you write up an architectural decision record ([ADR](https://github.blog/2020-08-13-why-write-adrs/)) detailing the change.
|
||||
Please use the [template](/adrs/0000-TEMPLATE.md) as guidance.
|
||||
|
||||
Please use the [template](/docs/adrs/yyyy-mm-dd-TEMPLATE) as guidance.
|
||||
|
||||
<!--
|
||||
TODO: Add a pre-requisite section describing what developers should
|
||||
@@ -45,6 +51,7 @@ Depending on what you are patching depends on how you should go about it.
|
||||
Below are some guides on how to test patches locally as well as develop the controller and runners.
|
||||
|
||||
When submitting a PR for a change please provide evidence that your change works as we still need to work on improving the CI of the project.
|
||||
|
||||
Some resources are provided for helping achieve this, see this guide for details.
|
||||
|
||||
### Developing the Controller
|
||||
@@ -130,7 +137,7 @@ GINKGO_FOCUS='[It] should create a new Runner resource from the specified templa
|
||||
>
|
||||
> If you want to stick with `snap`-provided `docker`, do not forget to set `TMPDIR` to somewhere under `$HOME`.
|
||||
> Otherwise `kind load docker-image` fail while running `docker save`.
|
||||
> See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap for more information.
|
||||
> See <https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap> for more information.
|
||||
|
||||
To test your local changes against both PAT and App based authentication please run the `acceptance` make target with the authentication configuration details provided:
|
||||
|
||||
@@ -186,7 +193,7 @@ Before shipping your PR, please check the following items to make sure CI passes
|
||||
- Run `go mod tidy` if you made changes to dependencies.
|
||||
- Format the code using `gofmt`
|
||||
- Run the `golangci-lint` tool locally.
|
||||
- We recommend you use `make lint` to run the tool using a Docker container matching the CI version.
|
||||
- We recommend you use `make lint` to run the tool using a Docker container matching the CI version.
|
||||
|
||||
### Opening the Pull Request
|
||||
|
||||
@@ -217,3 +224,146 @@ Please also note that you need to replace `$DOCKER_USER` with your own DockerHub
|
||||
Only the maintainers can release a new version of actions-runner-controller, publish a new version of the helm charts, and runner images.
|
||||
|
||||
All release workflows have been moved to [actions-runner-controller/releases](https://github.com/actions-runner-controller/releases) since the packages are owned by the former organization.
|
||||
|
||||
### Workflow structure
|
||||
|
||||
Following the migration of actions-runner-controller into GitHub actions, all the workflows had to be modified to accommodate the move to a new organization. The following table describes the workflows, their purpose and dependencies.
|
||||
|
||||
| Filename | Workflow name | Purpose |
|
||||
|-----------------------------------|--------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| gha-e2e-tests.yaml | (gha) E2E Tests | Tests the Autoscaling Runner Set mode end to end. Coverage is restricted to this mode. Legacy modes are not tested. |
|
||||
| go.yaml | Format, Lint, Unit Tests | Formats, lints and runs unit tests for the entire codebase. |
|
||||
| arc-publish.yaml | Publish ARC Image | Uploads release/actions-runner-controller.yaml as an artifact to the newly created release and triggers the [build and publication of the controller image](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-arc.yaml) |
|
||||
| global-publish-canary.yaml | Publish Canary Images | Builds and publishes canary controller container images for both new and legacy modes. |
|
||||
| arc-publish-chart.yaml | Publish ARC Helm Charts | Packages and publishes charts/actions-runner-controller (via GitHub Pages) |
|
||||
| gha-publish-chart.yaml | (gha) Publish Helm Charts | Packages and publishes charts/gha-runner-scale-set-controller and charts/gha-runner-scale-set charts (OCI to GHCR) |
|
||||
| arc-release-runners.yaml | Release ARC Runner Images | Triggers [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) which will build and push new runner images used with the legacy ARC modes. |
|
||||
| global-run-codeql.yaml | Run CodeQL | Run CodeQL on all the codebase |
|
||||
| global-run-first-interaction.yaml | First Interaction | Informs first time contributors what to expect when they open a new issue / PR |
|
||||
| global-run-stale.yaml | Run Stale Bot | Closes issues / PRs without activity |
|
||||
| arc-update-runners-scheduled.yaml | Runner Updates Check (Scheduled Job) | Polls [actions/runner](https://github.com/actions/runner) and [actions/runner-container-hooks](https://github.com/actions/runner-container-hooks) for new releases. If found, a PR is created to publish new runner images |
|
||||
| arc-validate-chart.yaml | Validate Helm Chart | Run helm chart validators for charts/actions-runner-controller |
|
||||
| gha-validate-chart.yaml | (gha) Validate Helm Charts | Run helm chart validators for charts/gha-runner-scale-set-controller and charts/gha-runner-scale-set charts |
|
||||
| arc-validate-runners.yaml | Validate ARC Runners | Run validators for runners |
|
||||
|
||||
There are 7 components that we release regularly:
|
||||
|
||||
1. legacy [actions-runner-controller controller image](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller)
|
||||
2. legacy [actions-runner-controller helm charts](https://actions-runner-controller.github.io/actions-runner-controller/)
|
||||
3. legacy actions-runner-controller runner images
|
||||
1. [ubuntu-20.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner)
|
||||
2. [ubuntu-22.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner)
|
||||
3. [dind-ubuntu-20.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner-dind)
|
||||
4. [dind-ubuntu-22.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner-dind)
|
||||
5. [dind-rootless-ubuntu-20.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner-dind-rootless)
|
||||
6. [dind-rootless-ubuntu-22.04](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner-dind-rootless)
|
||||
4. [gha-runner-scale-set-controller image](https://github.com/actions/actions-runner-controller/pkgs/container/gha-runner-scale-set-controller)
|
||||
5. [gha-runner-scale-set-controller helm charts](https://github.com/actions/actions-runner-controller/pkgs/container/actions-runner-controller-charts%2Fgha-runner-scale-set-controller)
|
||||
6. [gha-runner-scale-set runner helm charts](https://github.com/actions/actions-runner-controller/pkgs/container/actions-runner-controller-charts%2Fgha-runner-scale-set)
|
||||
7. [actions/runner image](https://github.com/actions/actions-runner-controller/pkgs/container/actions-runner-controller%2Factions-runner)
|
||||
|
||||
#### Releasing legacy actions-runner-controller image and helm charts
|
||||
|
||||
1. Start by making sure the master branch is stable and all CI jobs are passing
|
||||
2. Create a new release in <https://github.com/actions/actions-runner-controller/releases> (Draft a new release)
|
||||
3. Bump up the `version` and `appVersion` in charts/actions-runner-controller/Chart.yaml - make sure the `version` matches the release version you just created. (Example: <https://github.com/actions/actions-runner-controller/pull/2577>)
|
||||
4. When the workflows finish execution, you will see:
|
||||
1. A new controller image published to: <https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller>
|
||||
2. Helm charts published to: <https://github.com/actions-runner-controller/actions-runner-controller.github.io/tree/master/actions-runner-controller> (the index.yaml file is updated)
|
||||
|
||||
When a new release is created, the [Publish ARC Image](https://github.com/actions/actions-runner-controller/blob/master/.github/workflows/arc-publish.yaml) workflow is triggered.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph repository: actions/actions-runner-controller
|
||||
event_a{{"release: published"}} -- triggers --> workflow_a["arc-publish.yaml"]
|
||||
event_b{{"workflow_dispatch"}} -- triggers --> workflow_a["arc-publish.yaml"]
|
||||
workflow_a["arc-publish.yaml"] -- uploads --> package["actions-runner-controller.tar.gz"]
|
||||
end
|
||||
subgraph repository: actions-runner-controller/releases
|
||||
workflow_a["arc-publish.yaml"] -- triggers --> event_d{{"repository_dispatch"}} --> workflow_b["publish-arc.yaml"]
|
||||
workflow_b["publish-arc.yaml"] -- push --> A["GHCR: \nactions-runner-controller/actions-runner-controller:*"]
|
||||
workflow_b["publish-arc.yaml"] -- push --> B["DockerHub: \nsummerwind/actions-runner-controller:*"]
|
||||
end
|
||||
```
|
||||
|
||||
#### Release actions-runner-controller runner images
|
||||
|
||||
**Manual steps:**
|
||||
|
||||
1. Navigate to the [actions-runner-controller/releases](https://github.com/actions-runner-controller/releases) repository
|
||||
2. Trigger [the release-runners.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/release-runners.yaml) workflow.
|
||||
1. The list of input prameters for this workflow is defined in the table below (always inspect the workflow file for the latest version)
|
||||
|
||||
<!-- Table of Paramters -->
|
||||
| Parameter | Description | Default |
|
||||
|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
|
||||
| `runner_version` | The version of the [actions/runner](https://github.com/actions/runner) to use | `2.300.2` |
|
||||
| `docker_version` | The version of docker to use | `20.10.12` |
|
||||
| `runner_container_hooks_version` | The version of [actions/runner-container-hooks](https://github.com/actions/runner-container-hooks) to use | `0.2.0` |
|
||||
| `sha` | The commit sha from [actions/actions-runner-controller](https://github.com/actions/actions-runner-controller) to be used to build the runner images. This will be provided to `actions/checkout` & used to tag the container images | Empty string. |
|
||||
| `push_to_registries` | Whether to push the images to the registries. Use false to test the build | false |
|
||||
|
||||
**Automated steps:**
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
workflow["release-runners.yaml"] -- workflow_dispatch* --> workflow_b["release-runners.yaml"]
|
||||
subgraph repository: actions/actions-runner-controller
|
||||
runner_updates_check["arc-update-runners-scheduled.yaml"] -- "polls (daily)" --> runner_releases["actions/runner/releases"]
|
||||
runner_updates_check -- creates --> runner_update_pr["PR: update /runner/VERSION"]****
|
||||
runner_update_pr --> runner_update_pr_merge{{"merge"}}
|
||||
runner_update_pr_merge -- triggers --> workflow["release-runners.yaml"]
|
||||
end
|
||||
subgraph repository: actions-runner-controller/releases
|
||||
workflow_b["release-runners.yaml"] -- push --> A["GHCR: \n actions-runner-controller/actions-runner:* \n actions-runner-controller/actions-runner-dind:* \n actions-runner-controller/actions-runner-dind-rootless:*"]
|
||||
workflow_b["release-runners.yaml"] -- push --> B["DockerHub: \n summerwind/actions-runner:* \n summerwind/actions-runner-dind:* \n summerwind/actions-runner-dind-rootless:*"]
|
||||
event_b{{"workflow_dispatch"}} -- triggers --> workflow_b["release-runners.yaml"]
|
||||
end
|
||||
```
|
||||
|
||||
#### Release gha-runner-scale-set-controller image and helm charts
|
||||
|
||||
1. Make sure the master branch is stable and all CI jobs are passing
|
||||
1. Prepare a release PR (example: <https://github.com/actions/actions-runner-controller/pull/2467>)
|
||||
1. Bump up the version of the chart in: charts/gha-runner-scale-set-controller/Chart.yaml
|
||||
2. Bump up the version of the chart in: charts/gha-runner-scale-set/Chart.yaml
|
||||
1. Make sure that `version`, `appVersion` of both charts are always the same. These versions cannot diverge.
|
||||
3. Update the quickstart guide to reflect the latest versions: docs/preview/gha-runner-scale-set-controller/README.md
|
||||
4. Add changelog to the PR as well as the quickstart guide
|
||||
1. Merge the release PR
|
||||
1. Manually trigger the [(gha) Publish Helm Charts](https://github.com/actions/actions-runner-controller/actions/workflows/gha-publish-chart.yaml) workflow
|
||||
1. Manually create a tag and release in [actions/actions-runner-controller](https://github.com/actions/actions-runner-controller/releases) with the format: `gha-runner-scale-set-x.x.x` where the version (x.x.x) matches that of the Helm chart
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|-------------------------------------------------|--------------------------------------------------------------------------------------------------------|----------------|
|
||||
| `ref` | The branch, tag or SHA to cut a release from. | default branch |
|
||||
| `release_tag_name` | The tag of the controller image. This is not a git tag. | canary |
|
||||
| `push_to_registries` | Push images to registries. Use false to test the build process. | false |
|
||||
| `publish_gha_runner_scale_set_controller_chart` | Publish new helm chart for gha-runner-scale-set-controller. This will push the new OCI archive to GHCR | false |
|
||||
| `publish_gha_runner_scale_set_chart` | Publish new helm chart for gha-runner-scale-set. This will push the new OCI archive to GHCR | false |
|
||||
|
||||
#### Release actions/runner image
|
||||
|
||||
A new runner image is built and published to <https://github.com/actions/runner/pkgs/container/actions-runner> whenever a new runner binary has been released. There's nothing to do here.
|
||||
|
||||
#### Canary releases
|
||||
|
||||
We publish canary images for both the legacy actions-runner-controller and gha-runner-scale-set-controller images.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph org: actions
|
||||
event_a{{"push: [master]"}} -- triggers --> workflow_a["publish-canary.yaml"]
|
||||
end
|
||||
subgraph org: actions-runner-controller
|
||||
workflow_a["publish-canary.yaml"] -- triggers --> event_d{{"repository_dispatch"}} --> workflow_b["publish-canary.yaml"]
|
||||
workflow_b["publish-canary.yaml"] -- push --> A["GHCR: \nactions-runner-controller/actions-runner-controller:canary"]
|
||||
workflow_b["publish-canary.yaml"] -- push --> B["DockerHub: \nsummerwind/actions-runner-controller:canary"]
|
||||
end
|
||||
```
|
||||
|
||||
1. [actions-runner-controller canary image](https://github.com/actions-runner-controller/actions-runner-controller/pkgs/container/actions-runner-controller)
|
||||
2. [gha-runner-scale-set-controller image](https://github.com/actions/actions-runner-controller/pkgs/container/gha-runner-scale-set-controller)
|
||||
|
||||
These canary images are automatically built and released on each push to the master branch.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build the manager binary
|
||||
FROM --platform=$BUILDPLATFORM golang:1.19.4 as builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.20.7 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
|
||||
7
Makefile
7
Makefile
@@ -5,7 +5,7 @@ else
|
||||
endif
|
||||
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
||||
VERSION ?= dev
|
||||
RUNNER_VERSION ?= 2.303.0
|
||||
RUNNER_VERSION ?= 2.308.0
|
||||
TARGETPLATFORM ?= $(shell arch)
|
||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||
RUNNER_TAG ?= ${VERSION}
|
||||
@@ -95,7 +95,8 @@ run: generate fmt vet manifests
|
||||
run-scaleset: generate fmt vet
|
||||
CONTROLLER_MANAGER_POD_NAMESPACE=default \
|
||||
CONTROLLER_MANAGER_CONTAINER_IMAGE="${DOCKER_IMAGE_NAME}:${VERSION}" \
|
||||
go run ./main.go --auto-scaling-runner-set-only
|
||||
go run -ldflags="-s -w -X 'github.com/actions/actions-runner-controller/build.Version=$(VERSION)'" \
|
||||
./main.go --auto-scaling-runner-set-only
|
||||
|
||||
# Install CRDs into a cluster
|
||||
install: manifests
|
||||
@@ -202,7 +203,7 @@ generate: controller-gen
|
||||
|
||||
# Run shellcheck on runner scripts
|
||||
shellcheck: shellcheck-install
|
||||
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh
|
||||
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh hack/*.sh
|
||||
|
||||
docker-buildx:
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled ;\
|
||||
|
||||
48
README.md
48
README.md
@@ -4,42 +4,40 @@
|
||||
[](https://github.com/jonico/awesome-runners)
|
||||
[](https://artifacthub.io/packages/search?repo=actions-runner-controller)
|
||||
|
||||
## About
|
||||
|
||||
Actions Runner Controller (ARC) is a Kubernetes operator that orchestrates and scales self-hosted runners for GitHub Actions.
|
||||
|
||||
With ARC, you can create runner scale sets that automatically scale based on the number of workflows running in your repository, organization, or enterprise. Because controlled runners can be ephemeral and based on containers, new runner instances can scale up or down rapidly and cleanly. For more information about autoscaling, see ["Autoscaling with self-hosted runners."](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/autoscaling-with-self-hosted-runners)
|
||||
|
||||
You can set up ARC on Kubernetes using Helm, then create and run a workflow that uses runner scale sets. For more information about runner scale sets, see ["Deploying runner scale sets with Actions Runner Controller."](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller#runner-scale-set)
|
||||
## People
|
||||
|
||||
`actions-runner-controller` is an open-source project currently developed and maintained in collaboration with maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions), mostly in their spare time.
|
||||
Actions Runner Controller (ARC) is an open-source project currently developed and maintained in collaboration with the GitHub Actions team, external maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions/actions-runner-controller/discussions).
|
||||
|
||||
If you think the project is awesome and it's becoming a basis for your important business, consider [sponsoring us](https://github.com/sponsors/actions-runner-controller)!
|
||||
If you think the project is awesome and is adding value to your business, please consider directly sponsoring [community maintainers](https://github.com/sponsors/actions-runner-controller) and individual contributors via GitHub Sponsors.
|
||||
|
||||
In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means!
|
||||
|
||||
We don't currently have [any sponsors dedicated to this project yet](https://github.com/sponsors/actions-runner-controller).
|
||||
|
||||
However, [HelloFresh](https://www.hellofreshgroup.com/en/) has recently started sponsoring @mumoshu for this project along with his other works. A part of their sponsorship will enable @mumoshu to add an E2E test to keep ARC even more reliable on AWS. Thank you for your sponsorship!
|
||||
|
||||
[<img src="https://user-images.githubusercontent.com/22009/170898715-07f02941-35ec-418b-8cd4-251b422fa9ac.png" width="219" height="71" />](https://careers.hellofresh.com/)
|
||||
|
||||
## Status
|
||||
|
||||
Even though actions-runner-controller is used in production environments, it is still in its early stage of development, hence versioned 0.x.
|
||||
|
||||
actions-runner-controller complies to Semantic Versioning 2.0.0 in which v0.x means that there could be backward-incompatible changes for every release.
|
||||
|
||||
The documentation is kept inline with master@HEAD, we do our best to highlight any features that require a specific ARC version or higher however this is not always easily done due to there being many moving parts. Additionally, we actively do not retain compatibly with every GitHub Enterprise Server version nor every Kubernetes version so you will need to ensure you stay current within a reasonable timespan.
|
||||
|
||||
## About
|
||||
|
||||
[GitHub Actions](https://github.com/features/actions) is a very useful tool for automating development. GitHub Actions jobs are run in the cloud by default, but you may want to run your jobs in your environment. [Self-hosted runner](https://github.com/actions/runner) can be used for such use cases, but requires the provisioning and configuration of a virtual machine instance. Instead if you already have a Kubernetes cluster, it makes more sense to run the self-hosted runner on top of it.
|
||||
|
||||
**actions-runner-controller** makes that possible. Just create a *Runner* resource on your Kubernetes, and it will run and operate the self-hosted runner for the specified repository. Combined with Kubernetes RBAC, you can also build simple Self-hosted runners as a Service.
|
||||
See [the sponsorship dashboard](https://github.com/sponsors/actions-runner-controller) for the former and the current sponsors.
|
||||
|
||||
## Getting Started
|
||||
To give ARC a try with just a handful of commands, Please refer to the [Quickstart guide](/docs/quickstart.md).
|
||||
|
||||
For an overview of ARC, please refer to [About ARC](https://github.com/actions/actions-runner-controller/blob/master/docs/about-arc.md)
|
||||
To give ARC a try with just a handful of commands, Please refer to the [Quickstart guide](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller).
|
||||
|
||||
For more information, please refer to detailed documentation below!
|
||||
For an overview of ARC, please refer to [About ARC](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/about-actions-runner-controller)
|
||||
|
||||
## Documentation
|
||||
With the introduction of [autoscaling runner scale sets](https://github.com/actions/actions-runner-controller/discussions/2775), the existing [autoscaling modes](./docs/automatically-scaling-runners.md) are now legacy. The legacy modes have certain use cases and will continue to be maintained by the community only.
|
||||
|
||||
For further information on what is supported by GitHub and what's managed by the community, please refer to [this announcement discussion.](https://github.com/actions/actions-runner-controller/discussions/2775)
|
||||
|
||||
### Documentation
|
||||
|
||||
ARC documentation is available on [docs.github.com](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/quickstart-for-actions-runner-controller).
|
||||
|
||||
### Legacy documentation
|
||||
|
||||
The following documentation is for the legacy autoscaling modes that continue to be maintained by the community
|
||||
|
||||
- [Quickstart guide](/docs/quickstart.md)
|
||||
- [About ARC](/docs/about-arc.md)
|
||||
|
||||
@@ -304,3 +304,27 @@ If you noticed that it takes several minutes for sidecar dind container to be cr
|
||||
**Solution**
|
||||
|
||||
The solution is to switch to using faster storage, if you are experiencing this issue you are probably using HDD storage. Switching to SSD storage fixed the problem in my case. Most cloud providers have a list of storage options to use just pick something faster that your current disk, for on prem clusters you will need to invest in some SSDs.
|
||||
|
||||
### Dockerd no space left on device
|
||||
|
||||
**Problem**
|
||||
|
||||
If you are running many containers on your runner you might encounter an issue where docker daemon is unable to start new containers and you see error `no space left on device`.
|
||||
|
||||
**Solution**
|
||||
|
||||
Add a `dockerVarRunVolumeSizeLimit` key in your runner's spec with a higher size limit (the default is 1M) For instance:
|
||||
|
||||
```yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: github-runner
|
||||
namespace: github-system
|
||||
spec:
|
||||
replicas: 6
|
||||
template:
|
||||
spec:
|
||||
dockerVarRunVolumeSizeLimit: 50M
|
||||
env: []
|
||||
```
|
||||
@@ -102,6 +102,7 @@ if [ "${tool}" == "helm" ]; then
|
||||
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
||||
--set actionsMetricsServer.podAnnotations.test-id=${TEST_ID} \
|
||||
${flags[@]} --set image.imagePullPolicy=${IMAGE_PULL_POLICY} \
|
||||
--set image.dindSidecarRepositoryAndTag=${DIND_SIDECAR_REPOSITORY_AND_TAG} \
|
||||
-f ${VALUES_FILE}
|
||||
set +v
|
||||
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
||||
|
||||
@@ -6,6 +6,10 @@ OP=${OP:-apply}
|
||||
|
||||
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
||||
|
||||
# See https://github.com/actions/actions-runner-controller/issues/2123
|
||||
kubectl delete secret generic docker-config || :
|
||||
kubectl create secret generic docker-config --from-file .dockerconfigjson=<(jq -M 'del(.aliases)' $HOME/.docker/config.json) --type=kubernetes.io/dockerconfigjson || :
|
||||
|
||||
cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=${RUNNER_NAMESPACE} envsubst | kubectl apply -f -
|
||||
|
||||
if [ -n "${TEST_REPO}" ]; then
|
||||
|
||||
27
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
27
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
@@ -95,6 +95,24 @@ spec:
|
||||
# that part is created by dockerd.
|
||||
mountPath: /home/runner/.local
|
||||
readOnly: false
|
||||
# See https://github.com/actions/actions-runner-controller/issues/2123
|
||||
# Be sure to omit the "aliases" field from the config.json.
|
||||
# Otherwise you may encounter nasty errors like:
|
||||
# $ docker build
|
||||
# docker: 'buildx' is not a docker command.
|
||||
# See 'docker --help'
|
||||
# due to the incompatibility between your host docker config.json and the runner environment.
|
||||
# That is, your host dockcer config.json might contain this:
|
||||
# "aliases": {
|
||||
# "builder": "buildx"
|
||||
# }
|
||||
# And this results in the above error when the runner does not have buildx installed yet.
|
||||
- name: docker-config
|
||||
mountPath: /home/runner/.docker/config.json
|
||||
subPath: config.json
|
||||
readOnly: true
|
||||
- name: docker-config-root
|
||||
mountPath: /home/runner/.docker
|
||||
volumes:
|
||||
- name: rootless-dind-work-dir
|
||||
ephemeral:
|
||||
@@ -105,6 +123,15 @@ spec:
|
||||
resources:
|
||||
requests:
|
||||
storage: 3Gi
|
||||
- name: docker-config
|
||||
# Refer to .dockerconfigjson/.docker/config.json
|
||||
secret:
|
||||
secretName: docker-config
|
||||
items:
|
||||
- key: .dockerconfigjson
|
||||
path: config.json
|
||||
- name: docker-config-root
|
||||
emptyDir: {}
|
||||
|
||||
#
|
||||
# Non-standard working directory
|
||||
|
||||
@@ -52,6 +52,9 @@ type AutoscalingListenerSpec struct {
|
||||
// Required
|
||||
Image string `json:"image,omitempty"`
|
||||
|
||||
// Required
|
||||
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
|
||||
|
||||
// Required
|
||||
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
// HorizontalRunnerAutoscalerSpec defines the desired state of HorizontalRunnerAutoscaler
|
||||
type HorizontalRunnerAutoscalerSpec struct {
|
||||
// ScaleTargetRef sis the reference to scaled resource like RunnerDeployment
|
||||
// ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
||||
ScaleTargetRef ScaleTargetRef `json:"scaleTargetRef,omitempty"`
|
||||
|
||||
// MinReplicas is the minimum number of replicas the deployment is allowed to scale
|
||||
|
||||
@@ -70,6 +70,8 @@ type RunnerConfig struct {
|
||||
// +optional
|
||||
DockerRegistryMirror *string `json:"dockerRegistryMirror,omitempty"`
|
||||
// +optional
|
||||
DockerVarRunVolumeSizeLimit *resource.Quantity `json:"dockerVarRunVolumeSizeLimit,omitempty"`
|
||||
// +optional
|
||||
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
|
||||
// +optional
|
||||
VolumeStorageMedium *string `json:"volumeStorageMedium,omitempty"`
|
||||
|
||||
@@ -436,6 +436,11 @@ func (in *RunnerConfig) DeepCopyInto(out *RunnerConfig) {
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.DockerVarRunVolumeSizeLimit != nil {
|
||||
in, out := &in.DockerVarRunVolumeSizeLimit, &out.DockerVarRunVolumeSizeLimit
|
||||
x := (*in).DeepCopy()
|
||||
*out = &x
|
||||
}
|
||||
if in.VolumeSizeLimit != nil {
|
||||
in, out := &in.VolumeSizeLimit, &out.VolumeSizeLimit
|
||||
x := (*in).DeepCopy()
|
||||
|
||||
@@ -15,10 +15,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.23.0
|
||||
version: 0.23.3
|
||||
|
||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||
appVersion: 0.27.1
|
||||
appVersion: 0.27.4
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -35,18 +35,21 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `authSecret.github_basicauth_password` | Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API | |
|
||||
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. | |
|
||||
| `hostNetwork` | The "hostNetwork" of the controller container | false |
|
||||
| `dnsPolicy` | The "dnsPolicy" of the controller container | ClusterFirst |
|
||||
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
|
||||
| `image.tag` | The tag of the controller container | |
|
||||
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
|
||||
| `image.actionsRunnerImagePullSecrets` | Optional image pull secrets to be included in the runner pod's ImagePullSecrets | |
|
||||
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
|
||||
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
||||
| `metrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||
| `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||
| `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
|
||||
| `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
|
||||
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
|
||||
| `metrics.port` | Set port of metrics service | 8443 |
|
||||
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
||||
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
|
||||
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
|
||||
| `fullnameOverride` | Override the full resource names | |
|
||||
@@ -102,8 +105,11 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
|
||||
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
|
||||
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
|
||||
| `githubWebhookServer.terminationGracePeriodSeconds` | Set the githubWebhookServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
|
||||
| `githubWebhookServer.lifecycle` | Set the githubWebhookServer pod lifecycle hooks | `{}` |
|
||||
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
|
||||
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||
| `githubWebhookServer.service.loadBalancerSourceRanges` | Set githubWebhookServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
|
||||
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
|
||||
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||
@@ -115,9 +121,9 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `actionsMetricsServer.logLevel` | Set the log level of the actionsMetricsServer container | |
|
||||
| `actionsMetricsServer.logFormat` | Set the log format of the actionsMetricsServer controller. Valid options are "text" and "json" | text |
|
||||
| `actionsMetricsServer.enabled` | Deploy the actions metrics server pod | false |
|
||||
| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
|
||||
| `actionsMetricsServer.secret.enabled` | Passes the webhook hook secret to the actions-metrics-server | false |
|
||||
| `actionsMetricsServer.secret.create` | Deploy the webhook hook secret | false |
|
||||
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
|
||||
| `actionsMetricsServer.secret.name` | Set the name of the webhook hook secret | actions-metrics-server |
|
||||
| `actionsMetricsServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
||||
| `actionsMetricsServer.imagePullSecrets` | Specifies the secret to be used when pulling the actionsMetricsServer pod containers | |
|
||||
| `actionsMetricsServer.nameOverride` | Override the resource name prefix | |
|
||||
@@ -135,17 +141,22 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `actionsMetricsServer.tolerations` | Set the actionsMetricsServer pod tolerations | |
|
||||
| `actionsMetricsServer.affinity` | Set the actionsMetricsServer pod affinity rules | |
|
||||
| `actionsMetricsServer.priorityClassName` | Set the actionsMetricsServer pod priorityClassName | |
|
||||
| `actionsMetricsServer.terminationGracePeriodSeconds` | Set the actionsMetricsServer pod terminationGracePeriodSeconds. Useful when using preStop hooks to drain/sleep. | `10` |
|
||||
| `actionsMetricsServer.lifecycle` | Set the actionsMetricsServer pod lifecycle hooks | `{}` |
|
||||
| `actionsMetricsServer.service.type` | Set actionsMetricsServer service type | |
|
||||
| `actionsMetricsServer.service.ports` | Set actionsMetricsServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||
| `actionsMetricsServer.service.loadBalancerSourceRanges` | Set actionsMetricsServer loadBalancerSourceRanges for restricting loadBalancer type services | `[]` |
|
||||
| `actionsMetricsServer.ingress.enabled` | Deploy an ingress kind for the actionsMetricsServer | false |
|
||||
| `actionsMetricsServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||
| `actionsMetricsServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||
| `actionsMetricsServer.ingress.tls` | Set tls configuration for ingress | |
|
||||
| `actionsMetricsServer.ingress.ingressClassName` | Set ingress class name | |
|
||||
| `actionsMetrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||
| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | |
|
||||
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
|
||||
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||
| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||
| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
||||
| `actionsMetrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||
| `actionsMetrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||
| `actionsMetrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
|
||||
| `actionsMetrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
|
||||
| `actionsMetrics.serviceAnnotations` | Set annotations for the provisioned actions metrics service resource | |
|
||||
| `actionsMetrics.port` | Set port of actions metrics service | 8443 |
|
||||
| `actionsMetrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||
| `actionsMetrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||
| `actionsMetrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.13.1 |
|
||||
| `actionsMetrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||
|
||||
@@ -113,7 +113,7 @@ spec:
|
||||
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop)
|
||||
type: integer
|
||||
scaleTargetRef:
|
||||
description: ScaleTargetRef sis the reference to scaled resource like RunnerDeployment
|
||||
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
||||
properties:
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced
|
||||
|
||||
@@ -1497,6 +1497,12 @@ spec:
|
||||
type: integer
|
||||
dockerRegistryMirror:
|
||||
type: string
|
||||
dockerVarRunVolumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
|
||||
@@ -1479,6 +1479,12 @@ spec:
|
||||
type: integer
|
||||
dockerRegistryMirror:
|
||||
type: string
|
||||
dockerVarRunVolumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
|
||||
@@ -1432,6 +1432,12 @@ spec:
|
||||
type: integer
|
||||
dockerRegistryMirror:
|
||||
type: string
|
||||
dockerVarRunVolumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
|
||||
@@ -55,6 +55,12 @@ spec:
|
||||
type: integer
|
||||
dockerRegistryMirror:
|
||||
type: string
|
||||
dockerVarRunVolumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
dockerdWithinRunnerContainer:
|
||||
type: boolean
|
||||
effectiveTime:
|
||||
|
||||
@@ -36,8 +36,8 @@ spec:
|
||||
{{- end }}
|
||||
containers:
|
||||
- args:
|
||||
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
||||
{{- $metricsHost := .Values.actionsMetrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||
{{- $metricsPort := .Values.actionsMetrics.proxy.enabled | ternary "8080" .Values.actionsMetrics.port }}
|
||||
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
||||
{{- if .Values.actionsMetricsServer.logLevel }}
|
||||
- "--log-level={{ .Values.actionsMetricsServer.logLevel }}"
|
||||
@@ -50,6 +50,12 @@ spec:
|
||||
{{- end }}
|
||||
command:
|
||||
- "/actions-metrics-server"
|
||||
{{- if .Values.actionsMetricsServer.lifecycle }}
|
||||
{{- with .Values.actionsMetricsServer.lifecycle }}
|
||||
lifecycle:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||
valueFrom:
|
||||
@@ -116,8 +122,8 @@ spec:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
protocol: TCP
|
||||
{{- if not .Values.metrics.proxy.enabled }}
|
||||
- containerPort: {{ .Values.metrics.port }}
|
||||
{{- if not .Values.actionsMetrics.proxy.enabled }}
|
||||
- containerPort: {{ .Values.actionsMetrics.port }}
|
||||
name: metrics-port
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
@@ -125,24 +131,24 @@ spec:
|
||||
{{- toYaml .Values.actionsMetricsServer.resources | nindent 12 }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.actionsMetricsServer.securityContext | nindent 12 }}
|
||||
{{- if .Values.metrics.proxy.enabled }}
|
||||
{{- if .Values.actionsMetrics.proxy.enabled }}
|
||||
- args:
|
||||
- "--secure-listen-address=0.0.0.0:{{ .Values.metrics.port }}"
|
||||
- "--secure-listen-address=0.0.0.0:{{ .Values.actionsMetrics.port }}"
|
||||
- "--upstream=http://127.0.0.1:8080/"
|
||||
- "--logtostderr=true"
|
||||
- "--v=10"
|
||||
image: "{{ .Values.metrics.proxy.image.repository }}:{{ .Values.metrics.proxy.image.tag }}"
|
||||
image: "{{ .Values.actionsMetrics.proxy.image.repository }}:{{ .Values.actionsMetrics.proxy.image.tag }}"
|
||||
name: kube-rbac-proxy
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.metrics.port }}
|
||||
- containerPort: {{ .Values.actionsMetrics.port }}
|
||||
name: metrics-port
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 10
|
||||
terminationGracePeriodSeconds: {{ .Values.actionsMetricsServer.terminationGracePeriodSeconds }}
|
||||
{{- with .Values.actionsMetricsServer.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
||||
@@ -0,0 +1,90 @@
|
||||
{{- if .Values.actionsMetricsServer.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- horizontalrunnerautoscalers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- horizontalrunnerautoscalers/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- horizontalrunnerautoscalers/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnersets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnerdeployments
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnerdeployments/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnerdeployments/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
{{- end }}
|
||||
@@ -0,0 +1,14 @@
|
||||
{{- if .Values.actionsMetricsServer.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.roleName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }}
|
||||
{{- if .Values.actionsMetricsServer.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.actionsMetricsServer.service.annotations | nindent 4 }}
|
||||
@@ -16,11 +16,17 @@ spec:
|
||||
{{ range $_, $port := .Values.actionsMetricsServer.service.ports -}}
|
||||
- {{ $port | toYaml | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.metrics.serviceMonitor }}
|
||||
{{- if .Values.actionsMetrics.serviceMonitor.enable }}
|
||||
- name: metrics-port
|
||||
port: {{ .Values.metrics.port }}
|
||||
port: {{ .Values.actionsMetrics.port }}
|
||||
targetPort: metrics-port
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 4 }}
|
||||
{{- if .Values.actionsMetricsServer.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{- range $ip := .Values.actionsMetricsServer.service.loadBalancerSourceRanges }}
|
||||
- {{ $ip -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
{{- if and .Values.actionsMetricsServer.enabled .Values.actionsMetrics.serviceMonitor }}
|
||||
{{- if and .Values.actionsMetricsServer.enabled .Values.actionsMetrics.serviceMonitor.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
{{- with .Values.actionsMetricsServer.serviceMonitorLabels }}
|
||||
{{- with .Values.actionsMetrics.serviceMonitorLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "actions-runner-controller-actions-metrics-server.serviceMonitorName" . }}
|
||||
@@ -19,6 +19,8 @@ spec:
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
{{- end }}
|
||||
interval: {{ .Values.actionsMetrics.serviceMonitor.interval }}
|
||||
scrapeTimeout: {{ .Values.actionsMetrics.serviceMonitor.timeout }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "actions-runner-controller-actions-metrics-server.selectorLabels" . | nindent 6 }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if .Values.metrics.serviceMonitor }}
|
||||
{{- if .Values.metrics.serviceMonitor.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
@@ -19,6 +19,8 @@ spec:
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
{{- end }}
|
||||
interval: {{ .Values.metrics.serviceMonitor.interval }}
|
||||
scrapeTimeout: {{ .Values.metrics.serviceMonitor.timeout }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "actions-runner-controller.selectorLabels" . | nindent 6 }}
|
||||
|
||||
@@ -70,6 +70,9 @@ spec:
|
||||
{{- if .Values.logFormat }}
|
||||
- "--log-format={{ .Values.logFormat }}"
|
||||
{{- end }}
|
||||
{{- if .Values.dockerGID }}
|
||||
- "--docker-gid={{ .Values.dockerGID }}"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/manager"
|
||||
env:
|
||||
@@ -211,3 +214,6 @@ spec:
|
||||
{{- if .Values.hostNetwork }}
|
||||
hostNetwork: {{ .Values.hostNetwork }}
|
||||
{{- end }}
|
||||
{{- if .Values.dnsPolicy }}
|
||||
dnsPolicy: {{ .Values.dnsPolicy }}
|
||||
{{- end }}
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ include "actions-runner-controller-github-webhook-server.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 4 }}
|
||||
{{- if .Values.githubWebhookServer.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.githubWebhookServer.service.annotations | nindent 4 }}
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
{{ range $_, $port := .Values.githubWebhookServer.service.ports -}}
|
||||
- {{ $port | toYaml | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.metrics.serviceMonitor }}
|
||||
{{- if .Values.metrics.serviceMonitor.enable }}
|
||||
- name: metrics-port
|
||||
port: {{ .Values.metrics.port }}
|
||||
targetPort: metrics-port
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor }}
|
||||
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor.enable }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
@@ -19,6 +19,8 @@ spec:
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
{{- end }}
|
||||
interval: {{ .Values.metrics.serviceMonitor.interval }}
|
||||
scrapeTimeout: {{ .Values.metrics.serviceMonitor.timeout }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 6 }}
|
||||
|
||||
@@ -47,6 +47,7 @@ authSecret:
|
||||
#github_basicauth_username: ""
|
||||
#github_basicauth_password: ""
|
||||
|
||||
# http(s) should be specified for dockerRegistryMirror, e.g.: dockerRegistryMirror="https://<your-docker-registry-mirror>"
|
||||
dockerRegistryMirror: ""
|
||||
image:
|
||||
repository: "summerwind/actions-runner-controller"
|
||||
@@ -108,7 +109,10 @@ service:
|
||||
# Metrics service resource
|
||||
metrics:
|
||||
serviceAnnotations: {}
|
||||
serviceMonitor: false
|
||||
serviceMonitor:
|
||||
enable: false
|
||||
timeout: 30s
|
||||
interval: 1m
|
||||
serviceMonitorLabels: {}
|
||||
port: 8443
|
||||
proxy:
|
||||
@@ -188,9 +192,17 @@ admissionWebHooks:
|
||||
# https://github.com/actions/actions-runner-controller/issues/1005#issuecomment-993097155
|
||||
#hostNetwork: true
|
||||
|
||||
# If you use `hostNetwork: true`, then you need dnsPolicy: ClusterFirstWithHostNet
|
||||
# https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
|
||||
#dnsPolicy: ClusterFirst
|
||||
|
||||
## specify log format for actions runner controller. Valid options are "text" and "json"
|
||||
logFormat: text
|
||||
|
||||
# enable setting the docker group id for the runner container
|
||||
# https://github.com/actions/actions-runner-controller/pull/2499
|
||||
#dockerGID: 121
|
||||
|
||||
githubWebhookServer:
|
||||
enabled: false
|
||||
replicaCount: 1
|
||||
@@ -299,7 +311,10 @@ actionsMetrics:
|
||||
# as a part of the helm release.
|
||||
# Do note that you also need actionsMetricsServer.enabled=true
|
||||
# to deploy the actions-metrics-server whose k8s service is referenced by the service monitor.
|
||||
serviceMonitor: false
|
||||
serviceMonitor:
|
||||
enable: false
|
||||
timeout: 30s
|
||||
interval: 1m
|
||||
serviceMonitorLabels: {}
|
||||
port: 8443
|
||||
proxy:
|
||||
@@ -359,6 +374,7 @@ actionsMetricsServer:
|
||||
protocol: TCP
|
||||
name: http
|
||||
#nodePort: someFixedPortForUseWithTerraformCdkCfnEtc
|
||||
loadBalancerSourceRanges: []
|
||||
ingress:
|
||||
enabled: false
|
||||
ingressClassName: ""
|
||||
@@ -388,4 +404,5 @@ actionsMetricsServer:
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
terminationGracePeriodSeconds: 10
|
||||
lifecycle: {}
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.3.0
|
||||
version: 0.5.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.3.0"
|
||||
appVersion: "0.5.0"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -80,6 +80,9 @@ spec:
|
||||
image:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullSecrets:
|
||||
description: Required
|
||||
items:
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
|
||||
|
||||
{{- define "gha-base-name" -}}
|
||||
gha-rs-controller
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- default (include "gha-base-name" .) .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
@@ -14,7 +20,7 @@ If release name contains chart name it will be used as a full name.
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- $name := default (include "gha-base-name" .) .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
@@ -27,7 +33,7 @@ If release name contains chart name it will be used as a full name.
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set-controller.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- printf "%s-%s" (include "gha-base-name" .) .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
@@ -39,7 +45,7 @@ helm.sh/chart: {{ include "gha-runner-scale-set-controller.chart" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/part-of: gha-runner-scale-set-controller
|
||||
app.kubernetes.io/part-of: gha-rs-controller
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- range $k, $v := .Values.labels }}
|
||||
{{ $k }}: {{ $v }}
|
||||
@@ -51,6 +57,7 @@ Selector labels
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set-controller.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "gha-runner-scale-set-controller.name" . }}
|
||||
app.kubernetes.io/namespace: {{ .Release.Namespace }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
@@ -73,35 +80,43 @@ Create the name of the service account to use
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerClusterRoleName" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-role
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerClusterRoleBinding" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-cluster-rolebinding
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-role
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-single-namespace-rolebinding
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleName" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace-watch
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleBinding" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-single-namespace-watch
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerListenerRoleName" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-role
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-listener
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.managerListenerRoleBinding" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-manager-listener-rolebinding
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-listener
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.leaderElectionRoleName" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election-role
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.leaderElectionRoleBinding" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election-rolebinding
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}}
|
||||
@@ -111,3 +126,7 @@ Create the name of the service account to use
|
||||
{{- end }}
|
||||
{{- $names | join ","}}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.serviceMonitorName" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-service-monitor
|
||||
{{- end }}
|
||||
|
||||
@@ -23,7 +23,7 @@ spec:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: gha-runner-scale-set-controller
|
||||
app.kubernetes.io/part-of: gha-rs-controller
|
||||
app.kubernetes.io/component: controller-manager
|
||||
app.kubernetes.io/version: {{ .Chart.Version }}
|
||||
{{- include "gha-runner-scale-set-controller.selectorLabels" . | nindent 8 }}
|
||||
@@ -56,11 +56,34 @@ spec:
|
||||
{{- with .Values.flags.logLevel }}
|
||||
- "--log-level={{ . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.flags.logFormat }}
|
||||
- "--log-format={{ . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.flags.watchSingleNamespace }}
|
||||
- "--watch-single-namespace={{ . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.flags.updateStrategy }}
|
||||
- "--update-strategy={{ . }}"
|
||||
{{- end }}
|
||||
{{- if .Values.metrics }}
|
||||
{{- with .Values.metrics }}
|
||||
- "--listener-metrics-addr={{ .listenerAddr }}"
|
||||
- "--listener-metrics-endpoint={{ .listenerEndpoint }}"
|
||||
- "--metrics-addr={{ .controllerManagerAddr }}"
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
- "--listener-metrics-addr=0"
|
||||
- "--listener-metrics-endpoint="
|
||||
- "--metrics-addr=0"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/manager"
|
||||
{{- with .Values.metrics }}
|
||||
ports:
|
||||
- containerPort: {{regexReplaceAll ":([0-9]+)" .controllerManagerAddr "${1}"}}
|
||||
protocol: TCP
|
||||
name: metrics
|
||||
{{- end }}
|
||||
env:
|
||||
- name: CONTROLLER_MANAGER_CONTAINER_IMAGE
|
||||
value: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
@@ -68,14 +91,11 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
|
||||
value: "{{ .Values.image.pullPolicy | default "IfNotPresent" }}"
|
||||
{{- with .Values.env }}
|
||||
{{- if kindIs "slice" .Values.env }}
|
||||
{{- toYaml .Values.env | nindent 8 }}
|
||||
{{- else }}
|
||||
{{- range $key, $val := .Values.env }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
{{- if kindIs "slice" . }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.resources }}
|
||||
|
||||
@@ -78,6 +78,13 @@ rules:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
@@ -133,4 +140,5 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
@@ -81,4 +81,4 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
|
||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleName" . }}
|
||||
namespace: {{ .Values.flags.watchSingleNamespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -52,6 +52,13 @@ rules:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
@@ -114,4 +121,5 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
{{- end }}
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
@@ -2,14 +2,14 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleBinding" . }}
|
||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleBinding" . }}
|
||||
namespace: {{ .Values.flags.watchSingleNamespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceRoleName" . }}
|
||||
name: {{ include "gha-runner-scale-set-controller.managerSingleNamespaceWatchRoleName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -8,6 +9,7 @@ import (
|
||||
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"github.com/gruntwork-io/terratest/modules/k8s"
|
||||
"github.com/gruntwork-io/terratest/modules/logger"
|
||||
"github.com/gruntwork-io/terratest/modules/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -33,6 +35,7 @@ func TestTemplate_CreateServiceAccount(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"serviceAccount.create": "true",
|
||||
"serviceAccount.annotations.foo": "bar",
|
||||
@@ -46,7 +49,7 @@ func TestTemplate_CreateServiceAccount(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
||||
|
||||
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", serviceAccount.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", serviceAccount.Name)
|
||||
assert.Equal(t, "bar", string(serviceAccount.Annotations["foo"]))
|
||||
}
|
||||
|
||||
@@ -61,6 +64,7 @@ func TestTemplate_CreateServiceAccount_OverwriteName(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"serviceAccount.create": "true",
|
||||
"serviceAccount.name": "overwritten-name",
|
||||
@@ -90,6 +94,7 @@ func TestTemplate_CreateServiceAccount_CannotUseDefaultServiceAccount(t *testing
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"serviceAccount.create": "true",
|
||||
"serviceAccount.name": "default",
|
||||
@@ -113,6 +118,7 @@ func TestTemplate_NotCreateServiceAccount(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"serviceAccount.create": "false",
|
||||
"serviceAccount.name": "overwritten-name",
|
||||
@@ -136,6 +142,7 @@ func TestTemplate_NotCreateServiceAccount_ServiceAccountNotSet(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"serviceAccount.create": "false",
|
||||
"serviceAccount.annotations.foo": "bar",
|
||||
@@ -158,6 +165,7 @@ func TestTemplate_CreateManagerClusterRole(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
@@ -168,8 +176,8 @@ func TestTemplate_CreateManagerClusterRole(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &managerClusterRole)
|
||||
|
||||
assert.Empty(t, managerClusterRole.Namespace, "ClusterRole should not have a namespace")
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRole.Name)
|
||||
assert.Equal(t, 15, len(managerClusterRole.Rules))
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", managerClusterRole.Name)
|
||||
assert.Equal(t, 16, len(managerClusterRole.Rules))
|
||||
|
||||
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"})
|
||||
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_controller_role.yaml in chart", "We should get an error because the template should be skipped")
|
||||
@@ -189,6 +197,7 @@ func TestTemplate_ManagerClusterRoleBinding(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"serviceAccount.create": "true",
|
||||
},
|
||||
@@ -201,9 +210,9 @@ func TestTemplate_ManagerClusterRoleBinding(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &managerClusterRoleBinding)
|
||||
|
||||
assert.Empty(t, managerClusterRoleBinding.Namespace, "ClusterRoleBinding should not have a namespace")
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-rolebinding", managerClusterRoleBinding.Name)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerClusterRoleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", managerClusterRoleBinding.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", managerClusterRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", managerClusterRoleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, namespaceName, managerClusterRoleBinding.Subjects[0].Namespace)
|
||||
|
||||
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role_binding.yaml"})
|
||||
@@ -224,6 +233,7 @@ func TestTemplate_CreateManagerListenerRole(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
@@ -234,7 +244,7 @@ func TestTemplate_CreateManagerListenerRole(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &managerListenerRole)
|
||||
|
||||
assert.Equal(t, namespaceName, managerListenerRole.Namespace, "Role should have a namespace")
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRole.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller-listener", managerListenerRole.Name)
|
||||
assert.Equal(t, 4, len(managerListenerRole.Rules))
|
||||
assert.Equal(t, "pods", managerListenerRole.Rules[0].Resources[0])
|
||||
assert.Equal(t, "pods/status", managerListenerRole.Rules[1].Resources[0])
|
||||
@@ -253,6 +263,7 @@ func TestTemplate_ManagerListenerRoleBinding(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"serviceAccount.create": "true",
|
||||
},
|
||||
@@ -265,9 +276,9 @@ func TestTemplate_ManagerListenerRoleBinding(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &managerListenerRoleBinding)
|
||||
|
||||
assert.Equal(t, namespaceName, managerListenerRoleBinding.Namespace, "RoleBinding should have a namespace")
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-rolebinding", managerListenerRoleBinding.Name)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-listener-role", managerListenerRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerListenerRoleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller-listener", managerListenerRoleBinding.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller-listener", managerListenerRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", managerListenerRoleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, namespaceName, managerListenerRoleBinding.Subjects[0].Namespace)
|
||||
}
|
||||
|
||||
@@ -289,6 +300,7 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"image.tag": "dev",
|
||||
},
|
||||
@@ -301,29 +313,29 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||
|
||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
|
||||
assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name)
|
||||
assert.Equal(t, "gha-rs-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
||||
assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
||||
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
||||
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
||||
assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"])
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"])
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Labels["actions.github.com/controller-service-account-name"])
|
||||
assert.NotContains(t, deployment.Labels, "actions.github.com/controller-watch-single-namespace")
|
||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/part-of"])
|
||||
assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/part-of"])
|
||||
|
||||
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
||||
|
||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "gha-rs-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
|
||||
|
||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "gha-rs-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
|
||||
|
||||
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext)
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName)
|
||||
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
|
||||
@@ -345,17 +357,27 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
||||
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 2)
|
||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
expectedArgs := []string{
|
||||
"--auto-scaling-runner-set-only",
|
||||
"--log-level=debug",
|
||||
"--log-format=text",
|
||||
"--update-strategy=immediate",
|
||||
"--metrics-addr=0",
|
||||
"--listener-metrics-addr=0",
|
||||
"--listener-metrics-endpoint=",
|
||||
}
|
||||
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
||||
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
|
||||
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
|
||||
@@ -381,6 +403,7 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"labels.foo": "bar",
|
||||
"labels.github": "actions",
|
||||
@@ -388,9 +411,11 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
||||
"image.pullPolicy": "Always",
|
||||
"image.tag": "dev",
|
||||
"imagePullSecrets[0].name": "dockerhub",
|
||||
"nameOverride": "gha-runner-scale-set-controller-override",
|
||||
"fullnameOverride": "gha-runner-scale-set-controller-fullname-override",
|
||||
"serviceAccount.name": "gha-runner-scale-set-controller-sa",
|
||||
"nameOverride": "gha-rs-controller-override",
|
||||
"fullnameOverride": "gha-rs-controller-fullname-override",
|
||||
"env[0].name": "ENV_VAR_NAME_1",
|
||||
"env[0].value": "ENV_VAR_VALUE_1",
|
||||
"serviceAccount.name": "gha-rs-controller-sa",
|
||||
"podAnnotations.foo": "bar",
|
||||
"podSecurityContext.fsGroup": "1000",
|
||||
"securityContext.runAsUser": "1000",
|
||||
@@ -400,7 +425,10 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
||||
"tolerations[0].key": "foo",
|
||||
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key": "foo",
|
||||
"affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator": "bar",
|
||||
"priorityClassName": "test-priority-class",
|
||||
"priorityClassName": "test-priority-class",
|
||||
"flags.updateStrategy": "eventual",
|
||||
"flags.logLevel": "info",
|
||||
"flags.logFormat": "json",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
@@ -411,30 +439,33 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||
|
||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||
assert.Equal(t, "gha-runner-scale-set-controller-fullname-override", deployment.Name)
|
||||
assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
||||
assert.Equal(t, "gha-runner-scale-set-controller-override", deployment.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "gha-rs-controller-fullname-override", deployment.Name)
|
||||
assert.Equal(t, "gha-rs-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
||||
assert.Equal(t, "gha-rs-controller-override", deployment.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
||||
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
||||
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/part-of"])
|
||||
assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/part-of"])
|
||||
assert.Equal(t, "bar", deployment.Labels["foo"])
|
||||
assert.Equal(t, "actions", deployment.Labels["github"])
|
||||
|
||||
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
||||
|
||||
assert.Equal(t, "gha-runner-scale-set-controller-override", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "gha-rs-controller-override", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
|
||||
|
||||
assert.Equal(t, "gha-runner-scale-set-controller-override", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "gha-rs-controller-override", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
|
||||
|
||||
assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"])
|
||||
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
||||
|
||||
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1)
|
||||
assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name)
|
||||
assert.Equal(t, "gha-runner-scale-set-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Equal(t, "gha-rs-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Equal(t, int64(1000), *deployment.Spec.Template.Spec.SecurityContext.FSGroup)
|
||||
assert.Equal(t, "test-priority-class", deployment.Spec.Template.Spec.PriorityClassName)
|
||||
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
|
||||
@@ -462,15 +493,29 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
||||
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3)
|
||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
||||
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
||||
expectArgs := []string{
|
||||
"--auto-scaling-runner-set-only",
|
||||
"--auto-scaler-image-pull-secrets=dockerhub",
|
||||
"--log-level=info",
|
||||
"--log-format=json",
|
||||
"--update-strategy=eventual",
|
||||
"--listener-metrics-addr=0",
|
||||
"--listener-metrics-endpoint=",
|
||||
"--metrics-addr=0",
|
||||
}
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
||||
assert.ElementsMatch(t, expectArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 4)
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||
assert.Equal(t, "Always", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
||||
|
||||
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||
|
||||
@@ -494,6 +539,7 @@ func TestTemplate_EnableLeaderElectionRole(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"replicaCount": "2",
|
||||
},
|
||||
@@ -505,7 +551,7 @@ func TestTemplate_EnableLeaderElectionRole(t *testing.T) {
|
||||
var leaderRole rbacv1.Role
|
||||
helm.UnmarshalK8SYaml(t, output, &leaderRole)
|
||||
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-leader-election-role", leaderRole.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller-leader-election", leaderRole.Name)
|
||||
assert.Equal(t, namespaceName, leaderRole.Namespace)
|
||||
}
|
||||
|
||||
@@ -520,6 +566,7 @@ func TestTemplate_EnableLeaderElectionRoleBinding(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"replicaCount": "2",
|
||||
},
|
||||
@@ -531,10 +578,10 @@ func TestTemplate_EnableLeaderElectionRoleBinding(t *testing.T) {
|
||||
var leaderRoleBinding rbacv1.RoleBinding
|
||||
helm.UnmarshalK8SYaml(t, output, &leaderRoleBinding)
|
||||
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-leader-election-rolebinding", leaderRoleBinding.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller-leader-election", leaderRoleBinding.Name)
|
||||
assert.Equal(t, namespaceName, leaderRoleBinding.Namespace)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-leader-election-role", leaderRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", leaderRoleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller-leader-election", leaderRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", leaderRoleBinding.Subjects[0].Name)
|
||||
}
|
||||
|
||||
func TestTemplate_EnableLeaderElection(t *testing.T) {
|
||||
@@ -548,6 +595,7 @@ func TestTemplate_EnableLeaderElection(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"replicaCount": "2",
|
||||
"image.tag": "dev",
|
||||
@@ -561,7 +609,7 @@ func TestTemplate_EnableLeaderElection(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||
|
||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name)
|
||||
|
||||
assert.Equal(t, int32(2), *deployment.Spec.Replicas)
|
||||
|
||||
@@ -573,11 +621,19 @@ func TestTemplate_EnableLeaderElection(t *testing.T) {
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
||||
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 4)
|
||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
||||
assert.Equal(t, "--enable-leader-election", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
assert.Equal(t, "--leader-election-id=test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[3])
|
||||
expectedArgs := []string{
|
||||
"--auto-scaling-runner-set-only",
|
||||
"--enable-leader-election",
|
||||
"--leader-election-id=test-arc-gha-rs-controller",
|
||||
"--log-level=debug",
|
||||
"--log-format=text",
|
||||
"--update-strategy=immediate",
|
||||
"--listener-metrics-addr=0",
|
||||
"--listener-metrics-endpoint=",
|
||||
"--metrics-addr=0",
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
||||
}
|
||||
|
||||
func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
|
||||
@@ -591,6 +647,7 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"imagePullSecrets[0].name": "dockerhub",
|
||||
"imagePullSecrets[1].name": "ghcr",
|
||||
@@ -605,10 +662,18 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
|
||||
|
||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3)
|
||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
||||
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub,ghcr", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
||||
expectedArgs := []string{
|
||||
"--auto-scaling-runner-set-only",
|
||||
"--auto-scaler-image-pull-secrets=dockerhub,ghcr",
|
||||
"--log-level=debug",
|
||||
"--log-format=text",
|
||||
"--update-strategy=immediate",
|
||||
"--listener-metrics-addr=0",
|
||||
"--listener-metrics-endpoint=",
|
||||
"--metrics-addr=0",
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
||||
}
|
||||
|
||||
func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
||||
@@ -629,6 +694,7 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"image.tag": "dev",
|
||||
"flags.watchSingleNamespace": "demo",
|
||||
@@ -642,28 +708,28 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||
|
||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
|
||||
assert.Equal(t, "gha-runner-scale-set-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name)
|
||||
assert.Equal(t, "gha-rs-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
|
||||
assert.Equal(t, "gha-rs-controller", deployment.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Labels["app.kubernetes.io/instance"])
|
||||
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
|
||||
assert.Equal(t, "Helm", deployment.Labels["app.kubernetes.io/managed-by"])
|
||||
assert.Equal(t, namespaceName, deployment.Labels["actions.github.com/controller-service-account-namespace"])
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Labels["actions.github.com/controller-service-account-name"])
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Labels["actions.github.com/controller-service-account-name"])
|
||||
assert.Equal(t, "demo", deployment.Labels["actions.github.com/controller-watch-single-namespace"])
|
||||
|
||||
assert.Equal(t, int32(1), *deployment.Spec.Replicas)
|
||||
|
||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "gha-rs-controller", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Spec.Selector.MatchLabels["app.kubernetes.io/instance"])
|
||||
|
||||
assert.Equal(t, "gha-runner-scale-set-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "gha-rs-controller", deployment.Spec.Template.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-arc", deployment.Spec.Template.Labels["app.kubernetes.io/instance"])
|
||||
|
||||
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 0)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Nil(t, deployment.Spec.Template.Spec.SecurityContext)
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.PriorityClassName)
|
||||
assert.Equal(t, int64(10), *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds)
|
||||
@@ -685,18 +751,29 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Command, 1)
|
||||
assert.Equal(t, "/manager", deployment.Spec.Template.Spec.Containers[0].Command[0])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Args, 3)
|
||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
assert.Equal(t, "--watch-single-namespace=demo", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
||||
expectedArgs := []string{
|
||||
"--auto-scaling-runner-set-only",
|
||||
"--log-level=debug",
|
||||
"--log-format=text",
|
||||
"--watch-single-namespace=demo",
|
||||
"--update-strategy=immediate",
|
||||
"--listener-metrics-addr=0",
|
||||
"--listener-metrics-endpoint=",
|
||||
"--metrics-addr=0",
|
||||
}
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
||||
assert.ElementsMatch(t, expectedArgs, deployment.Spec.Template.Spec.Containers[0].Args)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
||||
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
|
||||
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
|
||||
@@ -704,6 +781,53 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
||||
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
|
||||
}
|
||||
|
||||
func TestTemplate_ControllerContainerEnvironmentVariables(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"env[0].Name": "ENV_VAR_NAME_1",
|
||||
"env[0].Value": "ENV_VAR_VALUE_1",
|
||||
"env[1].Name": "ENV_VAR_NAME_2",
|
||||
"env[1].ValueFrom.SecretKeyRef.Key": "ENV_VAR_NAME_2",
|
||||
"env[1].ValueFrom.SecretKeyRef.Name": "secret-name",
|
||||
"env[1].ValueFrom.SecretKeyRef.Optional": "true",
|
||||
"env[2].Name": "ENV_VAR_NAME_3",
|
||||
"env[2].Value": "",
|
||||
"env[3].Name": "ENV_VAR_NAME_4",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
|
||||
|
||||
var deployment appsv1.Deployment
|
||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||
|
||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", deployment.Name)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 7)
|
||||
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
||||
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].Name)
|
||||
assert.Equal(t, "secret-name", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Name)
|
||||
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Key)
|
||||
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Optional)
|
||||
assert.Equal(t, "ENV_VAR_NAME_3", deployment.Spec.Template.Spec.Containers[0].Env[5].Name)
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[5].Value)
|
||||
assert.Equal(t, "ENV_VAR_NAME_4", deployment.Spec.Template.Spec.Containers[0].Env[6].Name)
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[6].ValueFrom)
|
||||
}
|
||||
|
||||
func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -715,6 +839,7 @@ func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T)
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"flags.watchSingleNamespace": "demo",
|
||||
},
|
||||
@@ -736,6 +861,7 @@ func TestTemplate_WatchSingleNamespace_NotManagerClusterRoleBinding(t *testing.T
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"serviceAccount.create": "true",
|
||||
"flags.watchSingleNamespace": "demo",
|
||||
@@ -758,6 +884,7 @@ func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"flags.watchSingleNamespace": "demo",
|
||||
},
|
||||
@@ -769,7 +896,7 @@ func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) {
|
||||
var managerSingleNamespaceControllerRole rbacv1.Role
|
||||
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRole)
|
||||
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceControllerRole.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller-single-namespace", managerSingleNamespaceControllerRole.Name)
|
||||
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRole.Namespace)
|
||||
assert.Equal(t, 10, len(managerSingleNamespaceControllerRole.Rules))
|
||||
|
||||
@@ -778,9 +905,9 @@ func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) {
|
||||
var managerSingleNamespaceWatchRole rbacv1.Role
|
||||
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRole)
|
||||
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRole.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller-single-namespace-watch", managerSingleNamespaceWatchRole.Name)
|
||||
assert.Equal(t, "demo", managerSingleNamespaceWatchRole.Namespace)
|
||||
assert.Equal(t, 13, len(managerSingleNamespaceWatchRole.Rules))
|
||||
assert.Equal(t, 14, len(managerSingleNamespaceWatchRole.Rules))
|
||||
}
|
||||
|
||||
func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) {
|
||||
@@ -794,6 +921,7 @@ func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"flags.watchSingleNamespace": "demo",
|
||||
},
|
||||
@@ -805,10 +933,10 @@ func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) {
|
||||
var managerSingleNamespaceControllerRoleBinding rbacv1.RoleBinding
|
||||
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceControllerRoleBinding)
|
||||
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-rolebinding", managerSingleNamespaceControllerRoleBinding.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller-single-namespace", managerSingleNamespaceControllerRoleBinding.Name)
|
||||
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Namespace)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceControllerRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceControllerRoleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller-single-namespace", managerSingleNamespaceControllerRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", managerSingleNamespaceControllerRoleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, namespaceName, managerSingleNamespaceControllerRoleBinding.Subjects[0].Namespace)
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_watch_role_binding.yaml"})
|
||||
@@ -816,9 +944,81 @@ func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) {
|
||||
var managerSingleNamespaceWatchRoleBinding rbacv1.RoleBinding
|
||||
helm.UnmarshalK8SYaml(t, output, &managerSingleNamespaceWatchRoleBinding)
|
||||
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-rolebinding", managerSingleNamespaceWatchRoleBinding.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller-single-namespace-watch", managerSingleNamespaceWatchRoleBinding.Name)
|
||||
assert.Equal(t, "demo", managerSingleNamespaceWatchRoleBinding.Namespace)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", managerSingleNamespaceWatchRoleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller-single-namespace-watch", managerSingleNamespaceWatchRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "test-arc-gha-rs-controller", managerSingleNamespaceWatchRoleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, namespaceName, managerSingleNamespaceWatchRoleBinding.Subjects[0].Namespace)
|
||||
}
|
||||
|
||||
func TestControllerDeployment_MetricsPorts(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||
require.NoError(t, err)
|
||||
|
||||
chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml"))
|
||||
require.NoError(t, err)
|
||||
|
||||
chart := new(Chart)
|
||||
err = yaml.Unmarshal(chartContent, chart)
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"image.tag": "dev",
|
||||
"metrics.controllerManagerAddr": ":8080",
|
||||
"metrics.listenerAddr": ":8081",
|
||||
"metrics.listenerEndpoint": "/metrics",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
|
||||
|
||||
var deployment appsv1.Deployment
|
||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||
|
||||
require.Len(t, deployment.Spec.Template.Spec.Containers, 1, "Expected one container")
|
||||
container := deployment.Spec.Template.Spec.Containers[0]
|
||||
assert.Len(t, container.Ports, 1)
|
||||
port := container.Ports[0]
|
||||
assert.Equal(t, corev1.Protocol("TCP"), port.Protocol)
|
||||
assert.Equal(t, int32(8080), port.ContainerPort)
|
||||
|
||||
metricsFlags := map[string]*struct {
|
||||
expect string
|
||||
frequency int
|
||||
}{
|
||||
"--listener-metrics-addr": {
|
||||
expect: ":8081",
|
||||
},
|
||||
"--listener-metrics-endpoint": {
|
||||
expect: "/metrics",
|
||||
},
|
||||
"--metrics-addr": {
|
||||
expect: ":8080",
|
||||
},
|
||||
}
|
||||
for _, cmd := range container.Args {
|
||||
s := strings.Split(cmd, "=")
|
||||
if len(s) != 2 {
|
||||
continue
|
||||
}
|
||||
flag, ok := metricsFlags[s[0]]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
flag.frequency++
|
||||
assert.Equal(t, flag.expect, s[1])
|
||||
}
|
||||
|
||||
for key, value := range metricsFlags {
|
||||
assert.Equal(t, value.frequency, 1, fmt.Sprintf("frequency of %q is not 1", key))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,17 @@ imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
env:
|
||||
## Define environment variables for the controller pod
|
||||
# - name: "ENV_VAR_NAME_1"
|
||||
# value: "ENV_VAR_VALUE_1"
|
||||
# - name: "ENV_VAR_NAME_2"
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# key: ENV_VAR_NAME_2
|
||||
# name: secret-name
|
||||
# optional: true
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created for running the controller pod
|
||||
create: true
|
||||
@@ -31,27 +42,27 @@ serviceAccount:
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||
## choice for the user. This also increases chances charts run on environments with little
|
||||
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
@@ -65,10 +76,40 @@ affinity: {}
|
||||
priorityClassName: ""
|
||||
|
||||
flags:
|
||||
# Log level can be set here with one of the following values: "debug", "info", "warn", "error".
|
||||
# Defaults to "debug".
|
||||
## Log level can be set here with one of the following values: "debug", "info", "warn", "error".
|
||||
## Defaults to "debug".
|
||||
logLevel: "debug"
|
||||
## Log format can be set with one of the following values: "text", "json"
|
||||
## Defaults to "text"
|
||||
logFormat: "text"
|
||||
|
||||
# Restricts the controller to only watch resources in the desired namespace.
|
||||
# Defaults to watch all namespaces when unset.
|
||||
# watchSingleNamespace: ""
|
||||
## Restricts the controller to only watch resources in the desired namespace.
|
||||
## Defaults to watch all namespaces when unset.
|
||||
# watchSingleNamespace: ""
|
||||
|
||||
## Defines how the controller should handle upgrades while having running jobs.
|
||||
##
|
||||
## The srategies available are:
|
||||
## - "immediate": (default) The controller will immediately apply the change causing the
|
||||
## recreation of the listener and ephemeral runner set. This can lead to an
|
||||
## overprovisioning of runners, if there are pending / running jobs. This should not
|
||||
## be a problem at a small scale, but it could lead to a significant increase of
|
||||
## resources if you have a lot of jobs running concurrently.
|
||||
##
|
||||
## - "eventual": The controller will remove the listener and ephemeral runner set
|
||||
## immediately, but will not recreate them (to apply changes) until all
|
||||
## pending / running jobs have completed.
|
||||
## This can lead to a longer time to apply the change but it will ensure
|
||||
## that you don't have any overprovisioning of runners.
|
||||
updateStrategy: "immediate"
|
||||
|
||||
## If `metrics:` object is not provided, or commented out, the following flags
|
||||
## will be applied the controller-manager and listener pods with empty values:
|
||||
## `--metrics-addr`, `--listener-metrics-addr`, `--listener-metrics-endpoint`.
|
||||
## This will disable metrics.
|
||||
##
|
||||
## To enable metrics, uncomment the following lines.
|
||||
# metrics:
|
||||
# controllerManagerAddr: ":8080"
|
||||
# listenerAddr: ":8080"
|
||||
# listenerEndpoint: "/metrics"
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.3.0
|
||||
version: 0.5.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.3.0"
|
||||
appVersion: "0.5.0"
|
||||
|
||||
home: https://github.com/actions/dev-arc
|
||||
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
|
||||
{{- define "gha-base-name" -}}
|
||||
gha-rs
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- default (include "gha-base-name" .) .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
@@ -11,23 +16,15 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default (include "gha-base-name" .) }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- printf "%s-%s" (include "gha-base-name" .) .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
@@ -40,7 +37,9 @@ helm.sh/chart: {{ include "gha-runner-scale-set.chart" . }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/part-of: gha-runner-scale-set
|
||||
app.kubernetes.io/part-of: gha-rs
|
||||
actions.github.com/scale-set-name: {{ .Release.Name }}
|
||||
actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
@@ -64,15 +63,19 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.noPermissionServiceAccountName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-no-permission-service-account
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-no-permission
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubeModeRoleName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubeModeRoleBindingName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-service-account
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.dind-init-container" -}}
|
||||
@@ -430,11 +433,11 @@ volumeMounts:
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.managerRoleName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-manager-role
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-manager
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.managerRoleBinding" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-manager-role-binding
|
||||
{{- define "gha-runner-scale-set.managerRoleBindingName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-manager
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.managerServiceAccountName" -}}
|
||||
@@ -453,7 +456,7 @@ volumeMounts:
|
||||
{{- $managerServiceAccountName := "" }}
|
||||
{{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }}
|
||||
{{- if kindIs "map" $deployment.metadata.labels }}
|
||||
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-runner-scale-set-controller" }}
|
||||
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-rs-controller" }}
|
||||
{{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }}
|
||||
{{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }}
|
||||
{{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}}
|
||||
@@ -465,13 +468,13 @@ volumeMounts:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
|
||||
{{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
|
||||
{{- fail "Found both gha-runner-scale-set-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- if gt $multiNamespacesCounter 1 }}
|
||||
{{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- if eq $multiNamespacesCounter 1 }}
|
||||
{{- with $controllerDeployment.metadata }}
|
||||
@@ -484,11 +487,11 @@ volumeMounts:
|
||||
{{- $managerServiceAccountName = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-name") }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- fail "No gha-runner-scale-set-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq $managerServiceAccountName "" }}
|
||||
{{- fail "No service account name found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-name), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- fail "No service account name found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-name), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- $managerServiceAccountName }}
|
||||
{{- end }}
|
||||
@@ -510,7 +513,7 @@ volumeMounts:
|
||||
{{- $managerServiceAccountNamespace := "" }}
|
||||
{{- range $index, $deployment := (lookup "apps/v1" "Deployment" "" "").items }}
|
||||
{{- if kindIs "map" $deployment.metadata.labels }}
|
||||
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-runner-scale-set-controller" }}
|
||||
{{- if eq (get $deployment.metadata.labels "app.kubernetes.io/part-of") "gha-rs-controller" }}
|
||||
{{- if hasKey $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace" }}
|
||||
{{- $singleNamespaceCounter = add $singleNamespaceCounter 1 }}
|
||||
{{- $_ := set $singleNamespaceControllerDeployments (get $deployment.metadata.labels "actions.github.com/controller-watch-single-namespace") $deployment}}
|
||||
@@ -522,13 +525,13 @@ volumeMounts:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if and (eq $multiNamespacesCounter 0) (eq $singleNamespaceCounter 0) }}
|
||||
{{- fail "No gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- fail "No gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- if and (gt $multiNamespacesCounter 0) (gt $singleNamespaceCounter 0) }}
|
||||
{{- fail "Found both gha-runner-scale-set-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- fail "Found both gha-rs-controller installed with flags.watchSingleNamespace set and unset in cluster, this is not supported. Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- if gt $multiNamespacesCounter 1 }}
|
||||
{{- fail "More than one gha-runner-scale-set-controller deployment found using label (app.kubernetes.io/part-of=gha-runner-scale-set-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- fail "More than one gha-rs-controller deployment found using label (app.kubernetes.io/part-of=gha-rs-controller). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- if eq $multiNamespacesCounter 1 }}
|
||||
{{- with $controllerDeployment.metadata }}
|
||||
@@ -541,11 +544,11 @@ volumeMounts:
|
||||
{{- $managerServiceAccountNamespace = (get $controllerDeployment.metadata.labels "actions.github.com/controller-service-account-namespace") }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- fail "No gha-runner-scale-set-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- fail "No gha-rs-controller deployment that watch this namespace found using label (actions.github.com/controller-watch-single-namespace). Consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq $managerServiceAccountNamespace "" }}
|
||||
{{- fail "No service account namespace found for gha-runner-scale-set-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- fail "No service account namespace found for gha-rs-controller deployment using label (actions.github.com/controller-service-account-namespace), consider setting controllerServiceAccount.name in values.yaml to be explicit if you think the discovery is wrong." }}
|
||||
{{- end }}
|
||||
{{- $managerServiceAccountNamespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -12,6 +12,21 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: "autoscaling-runner-set"
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- if not (kindIs "string" .Values.githubConfigSecret) }}
|
||||
actions.github.com/cleanup-github-secret-name: {{ include "gha-runner-scale-set.githubsecret" . }}
|
||||
{{- end }}
|
||||
actions.github.com/cleanup-manager-role-binding: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
|
||||
actions.github.com/cleanup-manager-role-name: {{ include "gha-runner-scale-set.managerRoleName" . }}
|
||||
{{- if and $containerMode (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
actions.github.com/cleanup-kubernetes-mode-role-binding-name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
|
||||
actions.github.com/cleanup-kubernetes-mode-role-name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||
actions.github.com/cleanup-kubernetes-mode-service-account-name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
|
||||
{{- end }}
|
||||
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
actions.github.com/cleanup-no-permission-service-account-name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
|
||||
{{- end }}
|
||||
spec:
|
||||
githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }}
|
||||
githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }}
|
||||
@@ -104,7 +119,7 @@ spec:
|
||||
{{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.template.spec.initContainers }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
containers:
|
||||
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
finalizers:
|
||||
- actions.github.com/secret-protection
|
||||
- actions.github.com/cleanup-protection
|
||||
data:
|
||||
{{- $hasToken := false }}
|
||||
{{- $hasAppId := false }}
|
||||
@@ -36,4 +36,4 @@ data:
|
||||
{{- if and $hasAppId (or (not $hasInstallationId) (not $hasPrivateKey)) }}
|
||||
{{- fail "A valid .Values.githubConfigSecret is required for setting auth with GitHub server, provide .Values.githubConfigSecret.github_app_installation_id and .Values.githubConfigSecret.github_app_private_key." }}
|
||||
{{- end }}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
|
||||
@@ -6,6 +6,8 @@ kind: Role
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
|
||||
@@ -3,8 +3,10 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||
name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
|
||||
@@ -5,6 +5,8 @@ kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -3,6 +3,11 @@ kind: Role
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.managerRoleName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: manager-role
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -29,6 +34,17 @@ rules:
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
@@ -56,4 +72,4 @@ rules:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.managerRoleBinding" . }}
|
||||
name: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: manager-role-binding
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -10,4 +15,4 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "gha-runner-scale-set.managerServiceAccountName" . | nindent 4 }}
|
||||
namespace: {{ include "gha-runner-scale-set.managerServiceAccountNamespace" . | nindent 4 }}
|
||||
namespace: {{ include "gha-runner-scale-set.managerServiceAccountNamespace" . | nindent 4 }}
|
||||
|
||||
@@ -7,4 +7,6 @@ metadata:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
{{- end }}
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
actionsgithubcom "github.com/actions/actions-runner-controller/controllers/actions.github.com"
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"github.com/gruntwork-io/terratest/modules/k8s"
|
||||
"github.com/gruntwork-io/terratest/modules/logger"
|
||||
"github.com/gruntwork-io/terratest/modules/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -26,6 +29,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -41,9 +45,9 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &githubSecret)
|
||||
|
||||
assert.Equal(t, namespaceName, githubSecret.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", githubSecret.Name)
|
||||
assert.Equal(t, "test-runners-gha-rs-github-secret", githubSecret.Name)
|
||||
assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"]))
|
||||
assert.Equal(t, "actions.github.com/secret-protection", githubSecret.Finalizers[0])
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", githubSecret.Finalizers[0])
|
||||
}
|
||||
|
||||
func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) {
|
||||
@@ -57,6 +61,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_app_id": "10",
|
||||
@@ -90,6 +95,7 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAuthInput(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_app_id": "",
|
||||
@@ -117,6 +123,7 @@ func TestTemplateRenderedGitHubSecretErrorWithMissingAppInput(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_app_id": "10",
|
||||
@@ -143,6 +150,7 @@ func TestTemplateNotRenderedGitHubSecretWithPredefinedSecret(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secret",
|
||||
@@ -167,6 +175,7 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -181,13 +190,14 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
||||
|
||||
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", serviceAccount.Name)
|
||||
assert.Equal(t, "test-runners-gha-rs-no-permission", serviceAccount.Name)
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Equal(t, "test-runners-gha-rs-no-permission", ars.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) // no finalizer protections in place
|
||||
}
|
||||
|
||||
func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
@@ -201,6 +211,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -216,14 +227,18 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
||||
|
||||
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", serviceAccount.Name)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", serviceAccount.Name)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
|
||||
var role rbacv1.Role
|
||||
helm.UnmarshalK8SYaml(t, output, &role)
|
||||
|
||||
assert.Equal(t, namespaceName, role.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", role.Name)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", role.Name)
|
||||
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", role.Finalizers[0])
|
||||
|
||||
assert.Len(t, role.Rules, 5, "kube mode role should have 5 rules")
|
||||
assert.Equal(t, "pods", role.Rules[0].Resources[0])
|
||||
assert.Equal(t, "pods/exec", role.Rules[1].Resources[0])
|
||||
@@ -236,18 +251,21 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &roleBinding)
|
||||
|
||||
assert.Equal(t, namespaceName, roleBinding.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.Name)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.Name)
|
||||
assert.Len(t, roleBinding.Subjects, 1)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", roleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "Role", roleBinding.RoleRef.Kind)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
||||
expectedServiceAccountName := "test-runners-gha-rs-kube-mode"
|
||||
assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
|
||||
}
|
||||
|
||||
func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
|
||||
@@ -261,6 +279,7 @@ func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -279,6 +298,7 @@ func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Equal(t, "test-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
|
||||
@@ -292,6 +312,7 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -309,14 +330,14 @@ func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
|
||||
assert.Equal(t, namespaceName, ars.Namespace)
|
||||
assert.Equal(t, "test-runners", ars.Name)
|
||||
|
||||
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/part-of"])
|
||||
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/part-of"])
|
||||
assert.Equal(t, "autoscaling-runner-set", ars.Labels["app.kubernetes.io/component"])
|
||||
assert.NotEmpty(t, ars.Labels["app.kubernetes.io/version"])
|
||||
|
||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret)
|
||||
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
||||
|
||||
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
||||
|
||||
@@ -343,6 +364,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -361,10 +383,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) {
|
||||
assert.Equal(t, namespaceName, ars.Namespace)
|
||||
assert.Equal(t, "test-runners", ars.Name)
|
||||
|
||||
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret)
|
||||
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
||||
assert.Equal(t, "test-runner-scale-set-name", ars.Spec.RunnerScaleSetName)
|
||||
|
||||
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
||||
@@ -392,6 +414,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ProvideMetadata(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -439,6 +462,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MaxRunnersValidationError(t *testi
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -466,6 +490,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinRunnersValidationError(t *testi
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -494,6 +519,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationError(t *te
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -522,6 +548,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidationSameValue(t
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -553,6 +580,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMin(t
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -583,6 +611,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunnersValidation_OnlyMax(t
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -616,6 +645,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_MinMaxRunners_FromValuesFile(t *te
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
ValuesFiles: []string{testValuesPath},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
@@ -643,6 +673,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraVolumes(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
@@ -663,6 +694,50 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraVolumes(t *testing.T) {
|
||||
assert.Equal(t, "/data", ars.Spec.Template.Spec.Volumes[2].HostPath.Path, "Volume host path should be /data")
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraInitContainers(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
testValuesPath, err := filepath.Abs("../tests/values_dind_extra_init_containers.yaml")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
ValuesFiles: []string{testValuesPath},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 3, "InitContainers should be 3")
|
||||
assert.Equal(t, "kube-init", ars.Spec.Template.Spec.InitContainers[1].Name, "InitContainers[1] Name should be kube-init")
|
||||
assert.Equal(t, "runner-image:latest", ars.Spec.Template.Spec.InitContainers[1].Image, "InitContainers[1] Image should be runner-image:latest")
|
||||
assert.Equal(t, "sudo", ars.Spec.Template.Spec.InitContainers[1].Command[0], "InitContainers[1] Command[0] should be sudo")
|
||||
assert.Equal(t, "chown", ars.Spec.Template.Spec.InitContainers[1].Command[1], "InitContainers[1] Command[1] should be chown")
|
||||
assert.Equal(t, "-R", ars.Spec.Template.Spec.InitContainers[1].Command[2], "InitContainers[1] Command[2] should be -R")
|
||||
assert.Equal(t, "1001:123", ars.Spec.Template.Spec.InitContainers[1].Command[3], "InitContainers[1] Command[3] should be 1001:123")
|
||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].Command[4], "InitContainers[1] Command[4] should be /home/runner/_work")
|
||||
assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].Name, "InitContainers[1] VolumeMounts[0] Name should be work")
|
||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].MountPath, "InitContainers[1] VolumeMounts[0] MountPath should be /home/runner/_work")
|
||||
|
||||
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Name, "InitContainers[2] Name should be ls")
|
||||
assert.Equal(t, "ubuntu:latest", ars.Spec.Template.Spec.InitContainers[2].Image, "InitContainers[2] Image should be ubuntu:latest")
|
||||
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[2] Command[0] should be ls")
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -677,6 +752,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
@@ -713,6 +789,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_K8S_ExtraVolumes(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
@@ -744,6 +821,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -762,10 +840,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
|
||||
assert.Equal(t, namespaceName, ars.Namespace)
|
||||
assert.Equal(t, "test-runners", ars.Name)
|
||||
|
||||
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret)
|
||||
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
||||
|
||||
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
||||
|
||||
@@ -835,6 +913,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T)
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -853,10 +932,10 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T)
|
||||
assert.Equal(t, namespaceName, ars.Namespace)
|
||||
assert.Equal(t, "test-runners", ars.Name)
|
||||
|
||||
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", ars.Spec.GitHubConfigSecret)
|
||||
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
||||
|
||||
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
||||
assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil")
|
||||
@@ -892,6 +971,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T)
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secrets",
|
||||
@@ -909,7 +989,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_UsePredefinedSecret(t *testing.T)
|
||||
assert.Equal(t, namespaceName, ars.Namespace)
|
||||
assert.Equal(t, "test-runners", ars.Name)
|
||||
|
||||
assert.Equal(t, "gha-runner-scale-set", ars.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "gha-rs", ars.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||
assert.Equal(t, "pre-defined-secrets", ars.Spec.GitHubConfigSecret)
|
||||
@@ -926,6 +1006,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ErrorOnEmptyPredefinedSecret(t *te
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "",
|
||||
@@ -952,6 +1033,7 @@ func TestTemplateRenderedWithProxy(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secrets",
|
||||
@@ -1015,6 +1097,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
t.Run("providing githubServerTLS.runnerMountPath", func(t *testing.T) {
|
||||
t.Run("mode: default", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secrets",
|
||||
@@ -1073,6 +1156,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
|
||||
t.Run("mode: dind", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secrets",
|
||||
@@ -1132,6 +1216,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
|
||||
t.Run("mode: kubernetes", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secrets",
|
||||
@@ -1193,6 +1278,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
t.Run("without providing githubServerTLS.runnerMountPath", func(t *testing.T) {
|
||||
t.Run("mode: default", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secrets",
|
||||
@@ -1247,6 +1333,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
|
||||
t.Run("mode: dind", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secrets",
|
||||
@@ -1302,6 +1389,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
|
||||
t.Run("mode: kubernetes", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secrets",
|
||||
@@ -1391,6 +1479,7 @@ func TestTemplateNamingConstraints(t *testing.T) {
|
||||
for name, tc := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: setValues,
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", tc.namespaceName),
|
||||
}
|
||||
@@ -1412,6 +1501,7 @@ func TestTemplateRenderedGitHubConfigUrlEndsWIthSlash(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions/",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -1442,6 +1532,7 @@ func TestTemplate_CreateManagerRole(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -1457,8 +1548,12 @@ func TestTemplate_CreateManagerRole(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &managerRole)
|
||||
|
||||
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
|
||||
assert.Equal(t, 5, len(managerRole.Rules))
|
||||
assert.Equal(t, "test-runners-gha-rs-manager", managerRole.Name)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
|
||||
assert.Equal(t, 6, len(managerRole.Rules))
|
||||
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
}
|
||||
|
||||
func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) {
|
||||
@@ -1472,6 +1567,7 @@ func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -1488,9 +1584,10 @@ func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &managerRole)
|
||||
|
||||
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
|
||||
assert.Equal(t, 6, len(managerRole.Rules))
|
||||
assert.Equal(t, "configmaps", managerRole.Rules[5].Resources[0])
|
||||
assert.Equal(t, "test-runners-gha-rs-manager", managerRole.Name)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
|
||||
assert.Equal(t, 7, len(managerRole.Rules))
|
||||
assert.Equal(t, "configmaps", managerRole.Rules[6].Resources[0])
|
||||
}
|
||||
|
||||
func TestTemplate_CreateManagerRoleBinding(t *testing.T) {
|
||||
@@ -1504,6 +1601,7 @@ func TestTemplate_CreateManagerRoleBinding(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
@@ -1519,8 +1617,9 @@ func TestTemplate_CreateManagerRoleBinding(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &managerRoleBinding)
|
||||
|
||||
assert.Equal(t, namespaceName, managerRoleBinding.Namespace, "namespace should match the namespace of the Helm release")
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role-binding", managerRoleBinding.Name)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "test-runners-gha-rs-manager", managerRoleBinding.Name)
|
||||
assert.Equal(t, "test-runners-gha-rs-manager", managerRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", managerRoleBinding.Finalizers[0])
|
||||
assert.Equal(t, "arc", managerRoleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, "arc-system", managerRoleBinding.Subjects[0].Namespace)
|
||||
}
|
||||
@@ -1539,6 +1638,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraContainers(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
@@ -1586,6 +1686,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_ExtraPodSpec(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
@@ -1619,6 +1720,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinDMergePodSpec(t *testing.T) {
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
@@ -1664,6 +1766,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_KubeModeMergePodSpec(t *testing.T)
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
@@ -1692,3 +1795,107 @@ func TestTemplateRenderedAutoScalingRunnerSet_KubeModeMergePodSpec(t *testing.T)
|
||||
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
|
||||
assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others")
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
annotationExpectedTests := map[string]*helm.Options{
|
||||
"GitHub token": {
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
},
|
||||
"GitHub app": {
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_app_id": "10",
|
||||
"githubConfigSecret.github_app_installation_id": "100",
|
||||
"githubConfigSecret.github_app_private_key": "private_key",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
},
|
||||
}
|
||||
|
||||
for name, options := range annotationExpectedTests {
|
||||
t.Run("Annotation set: "+name, func(t *testing.T) {
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||
|
||||
assert.NotEmpty(t, autoscalingRunnerSet.Annotations[actionsgithubcom.AnnotationKeyGitHubSecretName])
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("Annotation should not be set", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secret",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||
|
||||
assert.Empty(t, autoscalingRunnerSet.Annotations[actionsgithubcom.AnnotationKeyGitHubSecretName])
|
||||
})
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
"containerMode.type": "kubernetes",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||
|
||||
annotationValues := map[string]string{
|
||||
actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-rs-github-secret",
|
||||
actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-rs-manager",
|
||||
actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-rs-manager",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-rs-kube-mode",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-rs-kube-mode",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-rs-kube-mode",
|
||||
}
|
||||
|
||||
for annotation, value := range annotationValues {
|
||||
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
githubConfigUrl: https://github.com/actions/actions-runner-controller
|
||||
githubConfigSecret:
|
||||
github_token: test
|
||||
template:
|
||||
spec:
|
||||
initContainers:
|
||||
- name: kube-init
|
||||
image: runner-image:latest
|
||||
command: ["sudo", "chown", "-R", "1001:123", "/home/runner/_work"]
|
||||
volumeMounts:
|
||||
- name: work
|
||||
mountPath: /home/runner/_work
|
||||
- name: ls
|
||||
image: ubuntu:latest
|
||||
command: ["ls"]
|
||||
containerMode:
|
||||
type: dind
|
||||
@@ -65,9 +65,15 @@ githubConfigSecret:
|
||||
# certificateFrom:
|
||||
# configMapKeyRef:
|
||||
# name: config-map-name
|
||||
# key: ca.pem
|
||||
# key: ca.crt
|
||||
# runnerMountPath: /usr/local/share/ca-certificates/
|
||||
|
||||
## Container mode is is an object that provides out-of-box configuration
|
||||
## for dind and kubernetes mode. Template will be modified as documented under the
|
||||
## template object.
|
||||
##
|
||||
## If any customization is required for dind or kubernetes mode, containerMode should remain
|
||||
## empty, and configuration should be applied to the template.
|
||||
# containerMode:
|
||||
# type: "dind" ## type can be set to dind or kubernetes
|
||||
# ## the following is required when containerMode.type=kubernetes
|
||||
|
||||
@@ -114,7 +114,14 @@ func createSession(ctx context.Context, logger *logr.Logger, client actions.Acti
|
||||
return runnerScaleSetSession, initialMessage, nil
|
||||
}
|
||||
|
||||
return runnerScaleSetSession, nil, nil
|
||||
initialMessage := &actions.RunnerScaleSetMessage{
|
||||
MessageId: 0,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: runnerScaleSetSession.Statistics,
|
||||
Body: "",
|
||||
}
|
||||
|
||||
return runnerScaleSetSession, initialMessage, nil
|
||||
}
|
||||
|
||||
func (m *AutoScalerClient) Close() error {
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestCreateSession(t *testing.T) {
|
||||
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
assert.Equal(t, session, session, "Session is not correct")
|
||||
assert.Nil(t, asClient.initialMessage, "Initial message should be nil")
|
||||
assert.NotNil(t, asClient.initialMessage, "Initial message should not be nil")
|
||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
@@ -188,7 +188,7 @@ func TestCreateSession_RetrySessionConflict(t *testing.T) {
|
||||
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
assert.Equal(t, session, session, "Session is not correct")
|
||||
assert.Nil(t, asClient.initialMessage, "Initial message should be nil")
|
||||
assert.NotNil(t, asClient.initialMessage, "Initial message should not be nil")
|
||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be 0")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
@@ -334,6 +334,14 @@ func TestGetRunnerScaleSetMessage(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
assert.NoError(t, err, "Error getting message")
|
||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Initial message")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return nil
|
||||
})
|
||||
|
||||
assert.NoError(t, err, "Error getting message")
|
||||
assert.Equal(t, int64(1), asClient.lastMessageId, "Last message id should be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
@@ -371,13 +379,21 @@ func TestGetRunnerScaleSetMessage_HandleFailed(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
|
||||
// read initial message
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return nil
|
||||
})
|
||||
|
||||
assert.NoError(t, err, "Error getting message")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return fmt.Errorf("error")
|
||||
})
|
||||
|
||||
assert.ErrorContains(t, err, "handle message failed. error", "Error getting message")
|
||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should be updated")
|
||||
assert.Equal(t, int64(0), asClient.lastMessageId, "Last message id should not be updated")
|
||||
assert.True(t, mockActionsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockSessionClient.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
@@ -513,6 +529,12 @@ func TestGetRunnerScaleSetMessage_RetryUntilGetMessage(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err, "Error getting initial message")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return nil
|
||||
@@ -550,6 +572,12 @@ func TestGetRunnerScaleSetMessage_ErrorOnGetMessage(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
|
||||
// process initial message
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err, "Error getting initial message")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
return fmt.Errorf("Should not be called")
|
||||
})
|
||||
@@ -592,6 +620,12 @@ func TestDeleteRunnerScaleSetMessage_Error(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err, "Error creating autoscaler client")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err, "Error getting initial message")
|
||||
|
||||
err = asClient.GetRunnerScaleSetMessage(ctx, func(msg *actions.RunnerScaleSetMessage) error {
|
||||
logger.Info("Message received", "messageId", msg.MessageId, "messageType", msg.MessageType, "body", msg.Body)
|
||||
return nil
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
@@ -25,6 +26,31 @@ type Service struct {
|
||||
kubeManager KubernetesManager
|
||||
settings *ScaleSettings
|
||||
currentRunnerCount int
|
||||
metricsExporter metricsExporter
|
||||
errs []error
|
||||
}
|
||||
|
||||
func WithPrometheusMetrics(conf RunnerScaleSetListenerConfig) func(*Service) {
|
||||
return func(svc *Service) {
|
||||
parsedURL, err := actions.ParseGitHubConfigFromURL(conf.ConfigureUrl)
|
||||
if err != nil {
|
||||
svc.errs = append(svc.errs, err)
|
||||
}
|
||||
|
||||
svc.metricsExporter.withBaseLabels(baseLabels{
|
||||
scaleSetName: conf.EphemeralRunnerSetName,
|
||||
scaleSetNamespace: conf.EphemeralRunnerSetNamespace,
|
||||
enterprise: parsedURL.Enterprise,
|
||||
organization: parsedURL.Organization,
|
||||
repository: parsedURL.Repository,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func WithLogger(logger logr.Logger) func(*Service) {
|
||||
return func(s *Service) {
|
||||
s.logger = logger.WithName("service")
|
||||
}
|
||||
}
|
||||
|
||||
func NewService(
|
||||
@@ -33,13 +59,13 @@ func NewService(
|
||||
manager KubernetesManager,
|
||||
settings *ScaleSettings,
|
||||
options ...func(*Service),
|
||||
) *Service {
|
||||
) (*Service, error) {
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
rsClient: rsClient,
|
||||
kubeManager: manager,
|
||||
settings: settings,
|
||||
currentRunnerCount: 0,
|
||||
currentRunnerCount: -1, // force patch on startup
|
||||
logger: logr.FromContextOrDiscard(ctx),
|
||||
}
|
||||
|
||||
@@ -47,18 +73,14 @@ func NewService(
|
||||
option(s)
|
||||
}
|
||||
|
||||
return s
|
||||
if len(s.errs) > 0 {
|
||||
return nil, errors.Join(s.errs...)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Service) Start() error {
|
||||
if s.settings.MinRunners > 0 {
|
||||
s.logger.Info("scale to match minimal runners.")
|
||||
err := s.scaleForAssignedJobCount(0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not scale to match minimal runners. %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
s.logger.Info("waiting for message...")
|
||||
select {
|
||||
@@ -89,11 +111,17 @@ func (s *Service) processMessage(message *actions.RunnerScaleSetMessage) error {
|
||||
"busy runners", message.Statistics.TotalBusyRunners,
|
||||
"idle runners", message.Statistics.TotalIdleRunners)
|
||||
|
||||
s.metricsExporter.publishStatistics(message.Statistics)
|
||||
|
||||
if message.MessageType != "RunnerScaleSetJobMessages" {
|
||||
s.logger.Info("skip message with unknown message type.", "messageType", message.MessageType)
|
||||
return nil
|
||||
}
|
||||
|
||||
if message.MessageId == 0 && message.Body == "" { // initial message with statistics only
|
||||
return s.scaleForAssignedJobCount(message.Statistics.TotalAssignedJobs)
|
||||
}
|
||||
|
||||
var batchedMessages []json.RawMessage
|
||||
if err := json.NewDecoder(strings.NewReader(message.Body)).Decode(&batchedMessages); err != nil {
|
||||
return fmt.Errorf("could not decode job messages. %w", err)
|
||||
@@ -114,27 +142,54 @@ func (s *Service) processMessage(message *actions.RunnerScaleSetMessage) error {
|
||||
if err := json.Unmarshal(message, &jobAvailable); err != nil {
|
||||
return fmt.Errorf("could not decode job available message. %w", err)
|
||||
}
|
||||
s.logger.Info("job available message received.", "RequestId", jobAvailable.RunnerRequestId)
|
||||
s.logger.Info(
|
||||
"job available message received.",
|
||||
"RequestId",
|
||||
jobAvailable.RunnerRequestId,
|
||||
)
|
||||
availableJobs = append(availableJobs, jobAvailable.RunnerRequestId)
|
||||
case "JobAssigned":
|
||||
var jobAssigned actions.JobAssigned
|
||||
if err := json.Unmarshal(message, &jobAssigned); err != nil {
|
||||
return fmt.Errorf("could not decode job assigned message. %w", err)
|
||||
}
|
||||
s.logger.Info("job assigned message received.", "RequestId", jobAssigned.RunnerRequestId)
|
||||
s.logger.Info(
|
||||
"job assigned message received.",
|
||||
"RequestId",
|
||||
jobAssigned.RunnerRequestId,
|
||||
)
|
||||
// s.metricsExporter.publishJobAssigned(&jobAssigned)
|
||||
case "JobStarted":
|
||||
var jobStarted actions.JobStarted
|
||||
if err := json.Unmarshal(message, &jobStarted); err != nil {
|
||||
return fmt.Errorf("could not decode job started message. %w", err)
|
||||
}
|
||||
s.logger.Info("job started message received.", "RequestId", jobStarted.RunnerRequestId, "RunnerId", jobStarted.RunnerId)
|
||||
s.logger.Info(
|
||||
"job started message received.",
|
||||
"RequestId",
|
||||
jobStarted.RunnerRequestId,
|
||||
"RunnerId",
|
||||
jobStarted.RunnerId,
|
||||
)
|
||||
s.metricsExporter.publishJobStarted(&jobStarted)
|
||||
s.updateJobInfoForRunner(jobStarted)
|
||||
case "JobCompleted":
|
||||
var jobCompleted actions.JobCompleted
|
||||
if err := json.Unmarshal(message, &jobCompleted); err != nil {
|
||||
return fmt.Errorf("could not decode job completed message. %w", err)
|
||||
}
|
||||
s.logger.Info("job completed message received.", "RequestId", jobCompleted.RunnerRequestId, "Result", jobCompleted.Result, "RunnerId", jobCompleted.RunnerId, "RunnerName", jobCompleted.RunnerName)
|
||||
s.logger.Info(
|
||||
"job completed message received.",
|
||||
"RequestId",
|
||||
jobCompleted.RunnerRequestId,
|
||||
"Result",
|
||||
jobCompleted.Result,
|
||||
"RunnerId",
|
||||
jobCompleted.RunnerId,
|
||||
"RunnerName",
|
||||
jobCompleted.RunnerName,
|
||||
)
|
||||
s.metricsExporter.publishJobCompleted(&jobCompleted)
|
||||
default:
|
||||
s.logger.Info("unknown job message type.", "messageType", messageType.MessageType)
|
||||
}
|
||||
@@ -150,13 +205,15 @@ func (s *Service) processMessage(message *actions.RunnerScaleSetMessage) error {
|
||||
|
||||
func (s *Service) scaleForAssignedJobCount(count int) error {
|
||||
targetRunnerCount := int(math.Max(math.Min(float64(s.settings.MaxRunners), float64(count)), float64(s.settings.MinRunners)))
|
||||
s.metricsExporter.publishDesiredRunners(targetRunnerCount)
|
||||
if targetRunnerCount != s.currentRunnerCount {
|
||||
s.logger.Info("try scale runner request up/down base on assigned job count",
|
||||
"assigned job", count,
|
||||
"decision", targetRunnerCount,
|
||||
"min", s.settings.MinRunners,
|
||||
"max", s.settings.MaxRunners,
|
||||
"currentRunnerCount", s.currentRunnerCount)
|
||||
"currentRunnerCount", s.currentRunnerCount,
|
||||
)
|
||||
err := s.kubeManager.ScaleEphemeralRunnerSet(s.ctx, s.settings.Namespace, s.settings.ResourceName, targetRunnerCount)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not scale ephemeral runner set (%s/%s). %w", s.settings.Namespace, s.settings.ResourceName, err)
|
||||
@@ -177,7 +234,8 @@ func (s *Service) updateJobInfoForRunner(jobInfo actions.JobStarted) {
|
||||
"workflowRef", jobInfo.JobWorkflowRef,
|
||||
"workflowRunId", jobInfo.WorkflowRunId,
|
||||
"jobDisplayName", jobInfo.JobDisplayName,
|
||||
"requestId", jobInfo.RunnerRequestId)
|
||||
"requestId", jobInfo.RunnerRequestId,
|
||||
)
|
||||
err := s.kubeManager.UpdateEphemeralRunnerWithJobInfo(s.ctx, s.settings.Namespace, jobInfo.RunnerName, jobInfo.OwnerName, jobInfo.RepositoryName, jobInfo.JobWorkflowRef, jobInfo.JobDisplayName, jobInfo.WorkflowRunId, jobInfo.RunnerRequestId)
|
||||
if err != nil {
|
||||
s.logger.Error(err, "could not update ephemeral runner with job info", "runnerName", jobInfo.RunnerName, "requestId", jobInfo.RunnerRequestId)
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestNewService(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -36,6 +36,7 @@ func TestNewService(t *testing.T) {
|
||||
},
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, logger, service.logger)
|
||||
}
|
||||
|
||||
@@ -47,7 +48,7 @@ func TestStart(t *testing.T) {
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -61,9 +62,11 @@ func TestStart(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||
|
||||
err := service.Start()
|
||||
err = service.Start()
|
||||
|
||||
assert.NoError(t, err, "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
@@ -72,13 +75,14 @@ func TestStart(t *testing.T) {
|
||||
|
||||
func TestStart_ScaleToMinRunners(t *testing.T) {
|
||||
mockRsClient := &MockRunnerScaleSetClient{}
|
||||
|
||||
mockKubeManager := &MockKubernetesManager{}
|
||||
logger, log_err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
logger = logger.WithName(t.Name())
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -92,11 +96,17 @@ func TestStart_ScaleToMinRunners(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything).Run(func(args mock.Arguments) {
|
||||
_ = service.scaleForAssignedJobCount(5)
|
||||
}).Return(nil)
|
||||
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||
|
||||
err := service.Start()
|
||||
|
||||
err = service.Start()
|
||||
assert.NoError(t, err, "Unexpected error")
|
||||
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
@@ -110,7 +120,7 @@ func TestStart_ScaleToMinRunnersFailed(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -124,11 +134,16 @@ func TestStart_ScaleToMinRunnersFailed(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(fmt.Errorf("error")).Once()
|
||||
require.NoError(t, err)
|
||||
|
||||
err := service.Start()
|
||||
c := mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(fmt.Errorf("error")).Once()
|
||||
mockRsClient.On("GetRunnerScaleSetMessage", ctx, mock.Anything).Run(func(args mock.Arguments) {
|
||||
_ = service.scaleForAssignedJobCount(5)
|
||||
}).Return(c.ReturnArguments.Get(0))
|
||||
|
||||
assert.ErrorContains(t, err, "could not scale to match minimal runners", "Unexpected error")
|
||||
err = service.Start()
|
||||
|
||||
assert.ErrorContains(t, err, "could not get and process message", "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
assert.True(t, mockKubeManager.AssertExpectations(t), "All expectations should be met")
|
||||
}
|
||||
@@ -141,7 +156,7 @@ func TestStart_GetMultipleMessages(t *testing.T) {
|
||||
require.NoError(t, log_err, "Error creating logger")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -155,10 +170,12 @@ func TestStart_GetMultipleMessages(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(nil).Times(5)
|
||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||
|
||||
err := service.Start()
|
||||
err = service.Start()
|
||||
|
||||
assert.NoError(t, err, "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
@@ -174,7 +191,7 @@ func TestStart_ErrorOnMessage(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -188,10 +205,12 @@ func TestStart_ErrorOnMessage(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(nil).Times(2)
|
||||
mockRsClient.On("GetRunnerScaleSetMessage", service.ctx, mock.Anything).Return(fmt.Errorf("error")).Once()
|
||||
|
||||
err := service.Start()
|
||||
err = service.Start()
|
||||
|
||||
assert.ErrorContains(t, err, "could not get and process message. error", "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
@@ -207,7 +226,7 @@ func TestProcessMessage_NoStatistic(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -221,8 +240,9 @@ func TestProcessMessage_NoStatistic(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "test",
|
||||
Body: "test",
|
||||
@@ -242,7 +262,7 @@ func TestProcessMessage_IgnoreUnknownMessageType(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -256,8 +276,9 @@ func TestProcessMessage_IgnoreUnknownMessageType(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "unknown",
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
@@ -280,7 +301,7 @@ func TestProcessMessage_InvalidBatchMessageJson(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -295,7 +316,9 @@ func TestProcessMessage_InvalidBatchMessageJson(t *testing.T) {
|
||||
},
|
||||
)
|
||||
|
||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
@@ -318,7 +341,7 @@ func TestProcessMessage_InvalidJobMessageJson(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -332,8 +355,9 @@ func TestProcessMessage_InvalidJobMessageJson(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
@@ -356,7 +380,7 @@ func TestProcessMessage_MultipleMessages(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -370,10 +394,12 @@ func TestProcessMessage_MultipleMessages(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 3 && ids[1] == 4 })).Return(nil).Once()
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||
|
||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
@@ -397,7 +423,7 @@ func TestProcessMessage_AcquireJobsFailed(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -411,9 +437,11 @@ func TestProcessMessage_AcquireJobsFailed(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return ids[0] == 1 })).Return(fmt.Errorf("error")).Once()
|
||||
|
||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
@@ -437,7 +465,7 @@ func TestScaleForAssignedJobCount_DeDupScale(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -451,9 +479,11 @@ func TestScaleForAssignedJobCount_DeDupScale(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(nil).Once()
|
||||
|
||||
err := service.scaleForAssignedJobCount(2)
|
||||
err = service.scaleForAssignedJobCount(2)
|
||||
require.NoError(t, err, "Unexpected error")
|
||||
err = service.scaleForAssignedJobCount(2)
|
||||
require.NoError(t, err, "Unexpected error")
|
||||
@@ -476,7 +506,7 @@ func TestScaleForAssignedJobCount_ScaleWithinMinMax(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -490,13 +520,15 @@ func TestScaleForAssignedJobCount_ScaleWithinMinMax(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 3).Return(nil).Once()
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 1).Return(nil).Once()
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 5).Return(nil).Once()
|
||||
|
||||
err := service.scaleForAssignedJobCount(0)
|
||||
err = service.scaleForAssignedJobCount(0)
|
||||
require.NoError(t, err, "Unexpected error")
|
||||
err = service.scaleForAssignedJobCount(3)
|
||||
require.NoError(t, err, "Unexpected error")
|
||||
@@ -521,7 +553,7 @@ func TestScaleForAssignedJobCount_ScaleFailed(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -535,9 +567,11 @@ func TestScaleForAssignedJobCount_ScaleFailed(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockKubeManager.On("ScaleEphemeralRunnerSet", ctx, service.settings.Namespace, service.settings.ResourceName, 2).Return(fmt.Errorf("error"))
|
||||
|
||||
err := service.scaleForAssignedJobCount(2)
|
||||
err = service.scaleForAssignedJobCount(2)
|
||||
|
||||
assert.ErrorContains(t, err, "could not scale ephemeral runner set (namespace/resource). error", "Unexpected error")
|
||||
assert.True(t, mockRsClient.AssertExpectations(t), "All expectations should be met")
|
||||
@@ -553,7 +587,7 @@ func TestProcessMessage_JobStartedMessage(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -567,12 +601,14 @@ func TestProcessMessage_JobStartedMessage(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.currentRunnerCount = 1
|
||||
|
||||
mockKubeManager.On("UpdateEphemeralRunnerWithJobInfo", ctx, service.settings.Namespace, "runner1", "owner1", "repo1", ".github/workflows/ci.yaml", "job1", int64(100), int64(3)).Run(func(args mock.Arguments) { cancel() }).Return(nil).Once()
|
||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
|
||||
|
||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
@@ -596,7 +632,7 @@ func TestProcessMessage_JobStartedMessageIgnoreRunnerUpdateError(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
service := NewService(
|
||||
service, err := NewService(
|
||||
ctx,
|
||||
mockRsClient,
|
||||
mockKubeManager,
|
||||
@@ -610,12 +646,14 @@ func TestProcessMessage_JobStartedMessageIgnoreRunnerUpdateError(t *testing.T) {
|
||||
s.logger = logger
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.currentRunnerCount = 1
|
||||
|
||||
mockKubeManager.On("UpdateEphemeralRunnerWithJobInfo", ctx, service.settings.Namespace, "runner1", "owner1", "repo1", ".github/workflows/ci.yaml", "job1", int64(100), int64(3)).Run(func(args mock.Arguments) { cancel() }).Return(fmt.Errorf("error")).Once()
|
||||
mockRsClient.On("AcquireJobsForRunnerScaleSet", ctx, mock.MatchedBy(func(ids []int64) bool { return len(ids) == 0 })).Return(nil).Once()
|
||||
|
||||
err := service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
err = service.processMessage(&actions.RunnerScaleSetMessage{
|
||||
MessageId: 1,
|
||||
MessageType: "RunnerScaleSetJobMessages",
|
||||
Statistics: &actions.RunnerScaleSetStatistic{
|
||||
|
||||
@@ -25,13 +25,17 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/logging"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/kelseyhightower/envconfig"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"golang.org/x/net/http/httpproxy"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type RunnerScaleSetListenerConfig struct {
|
||||
@@ -45,19 +49,34 @@ type RunnerScaleSetListenerConfig struct {
|
||||
MaxRunners int `split_words:"true"`
|
||||
MinRunners int `split_words:"true"`
|
||||
RunnerScaleSetId int `split_words:"true"`
|
||||
RunnerScaleSetName string `split_words:"true"`
|
||||
ServerRootCA string `split_words:"true"`
|
||||
LogLevel string `split_words:"true"`
|
||||
LogFormat string `split_words:"true"`
|
||||
MetricsAddr string `split_words:"true"`
|
||||
MetricsEndpoint string `split_words:"true"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
logger, err := logging.NewLogger(logging.LogLevelDebug, logging.LogFormatText)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err)
|
||||
var rc RunnerScaleSetListenerConfig
|
||||
if err := envconfig.Process("github", &rc); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: processing environment variables for RunnerScaleSetListenerConfig: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var rc RunnerScaleSetListenerConfig
|
||||
if err := envconfig.Process("github", &rc); err != nil {
|
||||
logger.Error(err, "Error: processing environment variables for RunnerScaleSetListenerConfig")
|
||||
logLevel := string(logging.LogLevelDebug)
|
||||
if rc.LogLevel != "" {
|
||||
logLevel = rc.LogLevel
|
||||
}
|
||||
|
||||
logFormat := string(logging.LogFormatText)
|
||||
if rc.LogFormat != "" {
|
||||
logFormat = rc.LogFormat
|
||||
}
|
||||
|
||||
logger, err := logging.NewLogger(logLevel, logFormat)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -67,17 +86,95 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := run(rc, logger); err != nil {
|
||||
logger.Error(err, "Run error")
|
||||
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
g.Go(func() error {
|
||||
opts := runOptions{
|
||||
serviceOptions: []func(*Service){
|
||||
WithLogger(logger),
|
||||
},
|
||||
}
|
||||
opts.serviceOptions = append(opts.serviceOptions, WithPrometheusMetrics(rc))
|
||||
|
||||
return run(ctx, rc, logger, opts)
|
||||
})
|
||||
|
||||
if len(rc.MetricsAddr) != 0 {
|
||||
g.Go(func() error {
|
||||
metricsServer := metricsServer{
|
||||
rc: rc,
|
||||
logger: logger,
|
||||
}
|
||||
g.Go(func() error {
|
||||
<-ctx.Done()
|
||||
return metricsServer.shutdown()
|
||||
})
|
||||
return metricsServer.listenAndServe()
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
logger.Error(err, "Error encountered")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func run(rc RunnerScaleSetListenerConfig, logger logr.Logger) error {
|
||||
// Create root context and hook with sigint and sigterm
|
||||
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
defer stop()
|
||||
type metricsServer struct {
|
||||
rc RunnerScaleSetListenerConfig
|
||||
logger logr.Logger
|
||||
srv *http.Server
|
||||
}
|
||||
|
||||
func (s *metricsServer) shutdown() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
return s.srv.Shutdown(ctx)
|
||||
}
|
||||
|
||||
func (s *metricsServer) listenAndServe() error {
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(
|
||||
// availableJobs,
|
||||
// acquiredJobs,
|
||||
assignedJobs,
|
||||
runningJobs,
|
||||
registeredRunners,
|
||||
busyRunners,
|
||||
minRunners,
|
||||
maxRunners,
|
||||
desiredRunners,
|
||||
idleRunners,
|
||||
startedJobsTotal,
|
||||
completedJobsTotal,
|
||||
// jobQueueDurationSeconds,
|
||||
jobStartupDurationSeconds,
|
||||
jobExecutionDurationSeconds,
|
||||
)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle(
|
||||
s.rc.MetricsEndpoint,
|
||||
promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}),
|
||||
)
|
||||
|
||||
s.srv = &http.Server{
|
||||
Addr: s.rc.MetricsAddr,
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
s.logger.Info("Starting metrics server", "address", s.srv.Addr)
|
||||
return s.srv.ListenAndServe()
|
||||
}
|
||||
|
||||
type runOptions struct {
|
||||
serviceOptions []func(*Service)
|
||||
}
|
||||
|
||||
func run(ctx context.Context, rc RunnerScaleSetListenerConfig, logger logr.Logger, opts runOptions) error {
|
||||
// Create root context and hook with sigint and sigterm
|
||||
creds := &actions.ActionsAuth{}
|
||||
if rc.Token != "" {
|
||||
creds.Token = rc.Token
|
||||
@@ -119,9 +216,10 @@ func run(rc RunnerScaleSetListenerConfig, logger logr.Logger) error {
|
||||
MinRunners: rc.MinRunners,
|
||||
}
|
||||
|
||||
service := NewService(ctx, autoScalerClient, kubeManager, scaleSettings, func(s *Service) {
|
||||
s.logger = logger.WithName("service")
|
||||
})
|
||||
service, err := NewService(ctx, autoScalerClient, kubeManager, scaleSettings, opts.serviceOptions...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new service: %v", err)
|
||||
}
|
||||
|
||||
// Start listening for messages
|
||||
if err = service.Start(); err != nil {
|
||||
|
||||
330
cmd/githubrunnerscalesetlistener/metrics.go
Normal file
330
cmd/githubrunnerscalesetlistener/metrics.go
Normal file
@@ -0,0 +1,330 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// label names
|
||||
const (
|
||||
labelKeyRunnerScaleSetName = "name"
|
||||
labelKeyRunnerScaleSetNamespace = "namespace"
|
||||
labelKeyEnterprise = "enterprise"
|
||||
labelKeyOrganization = "organization"
|
||||
labelKeyRepository = "repository"
|
||||
labelKeyJobName = "job_name"
|
||||
labelKeyJobWorkflowRef = "job_workflow_ref"
|
||||
labelKeyEventName = "event_name"
|
||||
labelKeyJobResult = "job_result"
|
||||
labelKeyRunnerID = "runner_id"
|
||||
labelKeyRunnerName = "runner_name"
|
||||
)
|
||||
|
||||
const githubScaleSetSubsystem = "gha"
|
||||
|
||||
// labels
|
||||
var (
|
||||
scaleSetLabels = []string{
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRepository,
|
||||
labelKeyOrganization,
|
||||
labelKeyEnterprise,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
}
|
||||
|
||||
jobLabels = []string{
|
||||
labelKeyRepository,
|
||||
labelKeyOrganization,
|
||||
labelKeyEnterprise,
|
||||
labelKeyJobName,
|
||||
labelKeyJobWorkflowRef,
|
||||
labelKeyEventName,
|
||||
}
|
||||
|
||||
completedJobsTotalLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
|
||||
jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
|
||||
startedJobsTotalLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
|
||||
jobStartupDurationLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
|
||||
)
|
||||
|
||||
// metrics
|
||||
var (
|
||||
// availableJobs = prometheus.NewGaugeVec(
|
||||
// prometheus.GaugeOpts{
|
||||
// Subsystem: githubScaleSetSubsystem,
|
||||
// Name: "available_jobs",
|
||||
// Help: "Number of jobs with `runs-on` matching the runner scale set name. Jobs are not yet assigned to the runner scale set.",
|
||||
// },
|
||||
// scaleSetLabels,
|
||||
// )
|
||||
//
|
||||
// acquiredJobs = prometheus.NewGaugeVec(
|
||||
// prometheus.GaugeOpts{
|
||||
// Subsystem: githubScaleSetSubsystem,
|
||||
// Name: "acquired_jobs",
|
||||
// Help: "Number of jobs acquired by the scale set.",
|
||||
// },
|
||||
// scaleSetLabels,
|
||||
// )
|
||||
|
||||
assignedJobs = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "assigned_jobs",
|
||||
Help: "Number of jobs assigned to this scale set.",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
runningJobs = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "running_jobs",
|
||||
Help: "Number of jobs running (or about to be run).",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
registeredRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "registered_runners",
|
||||
Help: "Number of runners registered by the scale set.",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
busyRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "busy_runners",
|
||||
Help: "Number of registered runners running a job.",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
minRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "min_runners",
|
||||
Help: "Minimum number of runners.",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
maxRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "max_runners",
|
||||
Help: "Maximum number of runners.",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
desiredRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "desired_runners",
|
||||
Help: "Number of runners desired by the scale set.",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
idleRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "idle_runners",
|
||||
Help: "Number of registered runners not running a job.",
|
||||
},
|
||||
scaleSetLabels,
|
||||
)
|
||||
|
||||
startedJobsTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "started_jobs_total",
|
||||
Help: "Total number of jobs started.",
|
||||
},
|
||||
startedJobsTotalLabels,
|
||||
)
|
||||
|
||||
completedJobsTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "completed_jobs_total",
|
||||
Help: "Total number of jobs completed.",
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
},
|
||||
completedJobsTotalLabels,
|
||||
)
|
||||
|
||||
// jobQueueDurationSeconds = prometheus.NewHistogramVec(
|
||||
// prometheus.HistogramOpts{
|
||||
// Subsystem: githubScaleSetSubsystem,
|
||||
// Name: "job_queue_duration_seconds",
|
||||
// Help: "Time spent waiting for workflow jobs to get assigned to the scale set after queueing (in seconds).",
|
||||
// Buckets: runtimeBuckets,
|
||||
// },
|
||||
// jobLabels,
|
||||
// )
|
||||
|
||||
jobStartupDurationSeconds = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "job_startup_duration_seconds",
|
||||
Help: "Time spent waiting for workflow job to get started on the runner owned by the scale set (in seconds).",
|
||||
Buckets: runtimeBuckets,
|
||||
},
|
||||
jobStartupDurationLabels,
|
||||
)
|
||||
|
||||
jobExecutionDurationSeconds = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: githubScaleSetSubsystem,
|
||||
Name: "job_execution_duration_seconds",
|
||||
Help: "Time spent executing workflow jobs by the scale set (in seconds).",
|
||||
Buckets: runtimeBuckets,
|
||||
},
|
||||
jobExecutionDurationLabels,
|
||||
)
|
||||
)
|
||||
|
||||
var runtimeBuckets []float64 = []float64{
|
||||
0.01,
|
||||
0.05,
|
||||
0.1,
|
||||
0.5,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
12,
|
||||
15,
|
||||
18,
|
||||
20,
|
||||
25,
|
||||
30,
|
||||
40,
|
||||
50,
|
||||
60,
|
||||
70,
|
||||
80,
|
||||
90,
|
||||
100,
|
||||
110,
|
||||
120,
|
||||
150,
|
||||
180,
|
||||
210,
|
||||
240,
|
||||
300,
|
||||
360,
|
||||
420,
|
||||
480,
|
||||
540,
|
||||
600,
|
||||
900,
|
||||
1200,
|
||||
1800,
|
||||
2400,
|
||||
3000,
|
||||
3600,
|
||||
}
|
||||
|
||||
type metricsExporter struct {
|
||||
// Initialized during creation.
|
||||
baseLabels
|
||||
}
|
||||
|
||||
type baseLabels struct {
|
||||
scaleSetName string
|
||||
scaleSetNamespace string
|
||||
enterprise string
|
||||
organization string
|
||||
repository string
|
||||
}
|
||||
|
||||
func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
|
||||
return prometheus.Labels{
|
||||
labelKeyEnterprise: b.enterprise,
|
||||
labelKeyOrganization: b.organization,
|
||||
labelKeyRepository: b.repository,
|
||||
labelKeyJobName: jobBase.JobDisplayName,
|
||||
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
|
||||
labelKeyEventName: jobBase.EventName,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *baseLabels) scaleSetLabels() prometheus.Labels {
|
||||
return prometheus.Labels{
|
||||
labelKeyRunnerScaleSetName: b.scaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace: b.scaleSetNamespace,
|
||||
labelKeyEnterprise: b.enterprise,
|
||||
labelKeyOrganization: b.organization,
|
||||
labelKeyRepository: b.repository,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *baseLabels) completedJobLabels(msg *actions.JobCompleted) prometheus.Labels {
|
||||
l := b.jobLabels(&msg.JobMessageBase)
|
||||
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
|
||||
l[labelKeyJobResult] = msg.Result
|
||||
l[labelKeyRunnerName] = msg.RunnerName
|
||||
return l
|
||||
}
|
||||
|
||||
func (b *baseLabels) startedJobLabels(msg *actions.JobStarted) prometheus.Labels {
|
||||
l := b.jobLabels(&msg.JobMessageBase)
|
||||
l[labelKeyRunnerID] = strconv.Itoa(msg.RunnerId)
|
||||
l[labelKeyRunnerName] = msg.RunnerName
|
||||
return l
|
||||
}
|
||||
|
||||
func (m *metricsExporter) withBaseLabels(base baseLabels) {
|
||||
m.baseLabels = base
|
||||
}
|
||||
|
||||
func (m *metricsExporter) publishStatistics(stats *actions.RunnerScaleSetStatistic) {
|
||||
l := m.scaleSetLabels()
|
||||
|
||||
// availableJobs.With(l).Set(float64(stats.TotalAvailableJobs))
|
||||
// acquiredJobs.With(l).Set(float64(stats.TotalAcquiredJobs))
|
||||
assignedJobs.With(l).Set(float64(stats.TotalAssignedJobs))
|
||||
runningJobs.With(l).Set(float64(stats.TotalRunningJobs))
|
||||
registeredRunners.With(l).Set(float64(stats.TotalRegisteredRunners))
|
||||
busyRunners.With(l).Set(float64(stats.TotalBusyRunners))
|
||||
idleRunners.With(l).Set(float64(stats.TotalIdleRunners))
|
||||
}
|
||||
|
||||
func (m *metricsExporter) publishJobStarted(msg *actions.JobStarted) {
|
||||
l := m.startedJobLabels(msg)
|
||||
startedJobsTotal.With(l).Inc()
|
||||
|
||||
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
|
||||
jobStartupDurationSeconds.With(l).Observe(float64(startupDuration))
|
||||
}
|
||||
|
||||
// func (m *metricsExporter) publishJobAssigned(msg *actions.JobAssigned) {
|
||||
// l := m.jobLabels(&msg.JobMessageBase)
|
||||
// queueDuration := msg.JobMessageBase.ScaleSetAssignTime.Unix() - msg.JobMessageBase.QueueTime.Unix()
|
||||
// jobQueueDurationSeconds.With(l).Observe(float64(queueDuration))
|
||||
// }
|
||||
|
||||
func (m *metricsExporter) publishJobCompleted(msg *actions.JobCompleted) {
|
||||
l := m.completedJobLabels(msg)
|
||||
completedJobsTotal.With(l).Inc()
|
||||
|
||||
executionDuration := msg.JobMessageBase.FinishTime.Unix() - msg.JobMessageBase.RunnerAssignTime.Unix()
|
||||
jobExecutionDurationSeconds.With(l).Observe(float64(executionDuration))
|
||||
}
|
||||
|
||||
func (m *metricsExporter) publishDesiredRunners(count int) {
|
||||
desiredRunners.With(m.scaleSetLabels()).Set(float64(count))
|
||||
}
|
||||
@@ -124,7 +124,7 @@ func main() {
|
||||
if watchNamespace == "" {
|
||||
logger.Info("-watch-namespace is empty. HorizontalRunnerAutoscalers in all the namespaces are watched, cached, and considered as scale targets.")
|
||||
} else {
|
||||
logger.Info("-watch-namespace is %q. Only HorizontalRunnerAutoscalers in %q are watched, cached, and considered as scale targets.", watchNamespace, watchNamespace)
|
||||
logger.Info(fmt.Sprintf("-watch-namespace is %q. Only HorizontalRunnerAutoscalers in %q are watched, cached, and considered as scale targets.", watchNamespace, watchNamespace))
|
||||
}
|
||||
|
||||
ctrl.SetLogger(logger)
|
||||
|
||||
@@ -80,6 +80,9 @@ spec:
|
||||
image:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullSecrets:
|
||||
description: Required
|
||||
items:
|
||||
|
||||
@@ -113,7 +113,7 @@ spec:
|
||||
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up Used to prevent flapping (down->up->down->... loop)
|
||||
type: integer
|
||||
scaleTargetRef:
|
||||
description: ScaleTargetRef sis the reference to scaled resource like RunnerDeployment
|
||||
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
||||
properties:
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced
|
||||
|
||||
@@ -1497,6 +1497,12 @@ spec:
|
||||
type: integer
|
||||
dockerRegistryMirror:
|
||||
type: string
|
||||
dockerVarRunVolumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
|
||||
@@ -1479,6 +1479,12 @@ spec:
|
||||
type: integer
|
||||
dockerRegistryMirror:
|
||||
type: string
|
||||
dockerVarRunVolumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
|
||||
@@ -1432,6 +1432,12 @@ spec:
|
||||
type: integer
|
||||
dockerRegistryMirror:
|
||||
type: string
|
||||
dockerVarRunVolumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
|
||||
@@ -55,6 +55,12 @@ spec:
|
||||
type: integer
|
||||
dockerRegistryMirror:
|
||||
type: string
|
||||
dockerVarRunVolumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
dockerdWithinRunnerContainer:
|
||||
type: boolean
|
||||
effectiveTime:
|
||||
|
||||
@@ -5,7 +5,7 @@ kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: summerwind/actions-runner-controller
|
||||
newTag: dev
|
||||
newTag: latest
|
||||
|
||||
replacements:
|
||||
- path: env-replacement.yaml
|
||||
|
||||
@@ -56,6 +56,8 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
|
||||
value: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: controller-manager
|
||||
mountPath: "/etc/actions-runner-controller"
|
||||
|
||||
@@ -102,6 +102,13 @@ rules:
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
- ephemeralrunnersets/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.github.com
|
||||
resources:
|
||||
|
||||
@@ -31,6 +31,6 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `autoscaler.enabled` | Enable the HorizontalRunnerAutoscaler, if its enabled then replica count will not be used | true |
|
||||
| `autoscaler.minReplicas` | Minimum no of replicas | 1 |
|
||||
| `autoscaler.maxReplicas` | Maximum no of replicas | 5 |
|
||||
| `autoscaler.scaleDownDelaySecondsAfterScaleOut` | [Anti-Flapping Configuration](https://github.com/actions/actions-runner-controller#anti-flapping-configuration) | 120 |
|
||||
| `autoscaler.metrics` | [Pull driven scaling](https://github.com/actions/actions-runner-controller#pull-driven-scaling) | default |
|
||||
| `autoscaler.scaleUpTriggers` | [Webhook driven scaling](https://github.com/actions/actions-runner-controller#webhook-driven-scaling) | |
|
||||
| `autoscaler.scaleDownDelaySecondsAfterScaleOut` | [Anti-Flapping Configuration](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#anti-flapping-configuration) | 120 |
|
||||
| `autoscaler.metrics` | [Pull driven scaling](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#pull-driven-scaling) | default |
|
||||
| `autoscaler.scaleUpTriggers` | [Webhook driven scaling](https://github.com/actions/actions-runner-controller/blob/master/docs/automatically-scaling-runners.md#webhook-driven-scaling) | |
|
||||
|
||||
@@ -17,7 +17,7 @@ runnerLabels:
|
||||
replicaCount: 1
|
||||
|
||||
# The Runner Group that the runner(s) should be associated with.
|
||||
# See https://docs.github.com/en/github-ae@latest/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups.
|
||||
# See https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/managing-access-to-self-hosted-runners-using-groups.
|
||||
group: Default
|
||||
|
||||
autoscaler:
|
||||
|
||||
@@ -33,6 +33,8 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
|
||||
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
hash "github.com/actions/actions-runner-controller/hash"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
@@ -41,7 +43,6 @@ import (
|
||||
|
||||
const (
|
||||
autoscalingListenerContainerName = "autoscaler"
|
||||
autoscalingListenerOwnerKey = ".metadata.controller"
|
||||
autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer"
|
||||
)
|
||||
|
||||
@@ -50,6 +51,10 @@ type AutoscalingListenerReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
// ListenerMetricsAddr is address that the metrics endpoint binds to.
|
||||
// If it is set to "0", the metrics server is not started.
|
||||
ListenerMetricsAddr string
|
||||
ListenerMetricsEndpoint string
|
||||
|
||||
resourceBuilder resourceBuilder
|
||||
}
|
||||
@@ -228,6 +233,11 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.publishRunningListener(autoscalingListener, false); err != nil {
|
||||
// If publish fails, URL is incorrect which means the listener pod would never be able to start
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Create a listener pod in the controller namespace
|
||||
log.Info("Creating a listener pod")
|
||||
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, mirrorSecret, log)
|
||||
@@ -243,68 +253,19 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
}
|
||||
}
|
||||
|
||||
if listenerPod.Status.Phase == corev1.PodRunning {
|
||||
if err := r.publishRunningListener(autoscalingListener, true); err != nil {
|
||||
log.Error(err, "Unable to publish running listener", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
// stop reconciling. We should never get to this point but if we do,
|
||||
// listener won't be able to start up, and the crash from the pod should
|
||||
// notify the reconciler again.
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
groupVersionIndexer := func(rawObj client.Object) []string {
|
||||
groupVersion := v1alpha1.GroupVersion.String()
|
||||
owner := metav1.GetControllerOf(rawObj)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...make sure it is owned by this controller
|
||||
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
|
||||
var requests []reconcile.Request
|
||||
labels := obj.GetLabels()
|
||||
namespace, ok := labels["auto-scaling-listener-namespace"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
name, ok := labels["auto-scaling-listener-name"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
requests = append(requests,
|
||||
reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
)
|
||||
return requests
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.AutoscalingListener{}).
|
||||
Owns(&corev1.Pod{}).
|
||||
Owns(&corev1.ServiceAccount{}).
|
||||
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) {
|
||||
logger.Info("Cleaning up the listener pod")
|
||||
listenerPod := new(corev1.Pod)
|
||||
@@ -320,6 +281,9 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
return false, nil
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener pods: %v", err)
|
||||
|
||||
default: // NOT FOUND
|
||||
_ = r.publishRunningListener(autoscalingListener, false) // If error is returned, we never published metrics so it is safe to ignore
|
||||
}
|
||||
logger.Info("Listener pod is deleted")
|
||||
|
||||
@@ -431,9 +395,22 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
||||
envs = append(envs, env)
|
||||
}
|
||||
|
||||
newPod := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, serviceAccount, secret, envs...)
|
||||
var metricsConfig *listenerMetricsServerConfig
|
||||
if r.ListenerMetricsAddr != "0" {
|
||||
metricsConfig = &listenerMetricsServerConfig{
|
||||
addr: r.ListenerMetricsAddr,
|
||||
endpoint: r.ListenerMetricsEndpoint,
|
||||
}
|
||||
}
|
||||
|
||||
newPod, err := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, serviceAccount, secret, metricsConfig, envs...)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to build listener pod")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if err := ctrl.SetControllerReference(autoscalingListener, newPod, r.Scheme); err != nil {
|
||||
logger.Error(err, "Failed to set controller reference")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
@@ -615,3 +592,86 @@ func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context
|
||||
"serviceAccount", serviceAccount.Name)
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) publishRunningListener(autoscalingListener *v1alpha1.AutoscalingListener, isUp bool) error {
|
||||
githubConfigURL := autoscalingListener.Spec.GitHubConfigUrl
|
||||
parsedURL, err := actions.ParseGitHubConfigFromURL(githubConfigURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
commonLabels := metrics.CommonLabels{
|
||||
Name: autoscalingListener.Name,
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
Repository: parsedURL.Repository,
|
||||
Organization: parsedURL.Organization,
|
||||
Enterprise: parsedURL.Enterprise,
|
||||
}
|
||||
|
||||
if isUp {
|
||||
metrics.AddRunningListener(commonLabels)
|
||||
} else {
|
||||
metrics.SubRunningListener(commonLabels)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
groupVersionIndexer := func(rawObj client.Object) []string {
|
||||
groupVersion := v1alpha1.GroupVersion.String()
|
||||
owner := metav1.GetControllerOf(rawObj)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...make sure it is owned by this controller
|
||||
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, resourceOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, resourceOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
|
||||
var requests []reconcile.Request
|
||||
labels := obj.GetLabels()
|
||||
namespace, ok := labels["auto-scaling-listener-namespace"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
name, ok := labels["auto-scaling-listener-name"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
requests = append(requests,
|
||||
reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
)
|
||||
return requests
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.AutoscalingListener{}).
|
||||
Owns(&corev1.Pod{}).
|
||||
Owns(&corev1.ServiceAccount{}).
|
||||
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@@ -213,7 +213,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
Eventually(
|
||||
func() error {
|
||||
podList := new(corev1.PodList)
|
||||
err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name})
|
||||
err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingListener.Name})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -231,7 +231,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
Eventually(
|
||||
func() error {
|
||||
serviceAccountList := new(corev1.ServiceAccountList)
|
||||
err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name})
|
||||
err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingListener.Name})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -24,9 +24,11 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -41,14 +43,30 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO: Replace with shared image.
|
||||
autoscalingRunnerSetOwnerKey = ".metadata.controller"
|
||||
LabelKeyRunnerSpecHash = "runner-spec-hash"
|
||||
labelKeyRunnerSpecHash = "runner-spec-hash"
|
||||
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
|
||||
runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
|
||||
runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
|
||||
)
|
||||
|
||||
type UpdateStrategy string
|
||||
|
||||
// Defines how the controller should handle upgrades while having running jobs.
|
||||
const (
|
||||
// "immediate": (default) The controller will immediately apply the change causing the
|
||||
// recreation of the listener and ephemeral runner set. This can lead to an
|
||||
// overprovisioning of runners, if there are pending / running jobs. This should not
|
||||
// be a problem at a small scale, but it could lead to a significant increase of
|
||||
// resources if you have a lot of jobs running concurrently.
|
||||
UpdateStrategyImmediate = UpdateStrategy("immediate")
|
||||
// "eventual": The controller will remove the listener and ephemeral runner set
|
||||
// immediately, but will not recreate them (to apply changes) until all
|
||||
// pending / running jobs have completed.
|
||||
// This can lead to a longer time to apply the change but it will ensure
|
||||
// that you don't have any overprovisioning of runners.
|
||||
UpdateStrategyEventual = UpdateStrategy("eventual")
|
||||
)
|
||||
|
||||
// AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object
|
||||
type AutoscalingRunnerSetReconciler struct {
|
||||
client.Client
|
||||
@@ -57,6 +75,7 @@ type AutoscalingRunnerSetReconciler struct {
|
||||
ControllerNamespace string
|
||||
DefaultRunnerScaleSetListenerImage string
|
||||
DefaultRunnerScaleSetListenerImagePullSecrets []string
|
||||
UpdateStrategy UpdateStrategy
|
||||
ActionsClient actions.MultiClient
|
||||
|
||||
resourceBuilder resourceBuilder
|
||||
@@ -113,6 +132,17 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
requeue, err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to remove finalizers on dependent resources")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if requeue {
|
||||
log.Info("Waiting for dependent resources to be deleted")
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
log.Info("Removing finalizer")
|
||||
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName)
|
||||
@@ -126,6 +156,22 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion] != build.Version {
|
||||
if err := r.Delete(ctx, autoscalingRunnerSet); err != nil {
|
||||
log.Error(err, "Failed to delete autoscaling runner set on version mismatch",
|
||||
"targetVersion", build.Version,
|
||||
"actualVersion", autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
log.Info("Autoscaling runner set version doesn't match the build version. Deleting the resource.",
|
||||
"targetVersion", build.Version,
|
||||
"actualVersion", autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) {
|
||||
log.Info("Adding finalizer")
|
||||
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||
@@ -188,10 +234,51 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
|
||||
desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||
for _, runnerSet := range existingRunnerSets.all() {
|
||||
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[LabelKeyRunnerSpecHash])
|
||||
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[labelKeyRunnerSpecHash])
|
||||
}
|
||||
|
||||
if desiredSpecHash != latestRunnerSet.Labels[LabelKeyRunnerSpecHash] {
|
||||
// Make sure the AutoscalingListener is up and running in the controller namespace
|
||||
listener := new(v1alpha1.AutoscalingListener)
|
||||
listenerFound := true
|
||||
if err := r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, listener); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Failed to get AutoscalingListener resource")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
listenerFound = false
|
||||
log.Info("AutoscalingListener does not exist.")
|
||||
}
|
||||
|
||||
// Our listener pod is out of date, so we need to delete it to get a new recreate.
|
||||
if listenerFound && (listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash()) {
|
||||
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
|
||||
if err := r.Delete(ctx, listener); err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
log.Error(err, "Failed to delete AutoscalingListener resource")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Deleted RunnerScaleSetListener since existing one is out of date")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] {
|
||||
if r.drainingJobs(&latestRunnerSet.Status) {
|
||||
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Waiting for the running and pending runners to finish:", "running", latestRunnerSet.Status.RunningEphemeralRunners, "pending", latestRunnerSet.Status.PendingEphemeralRunners)
|
||||
log.Info("Scaling down the number of desired replicas to 0")
|
||||
// We are in the process of draining the jobs. The listener has been deleted and the ephemeral runner set replicas
|
||||
// need to scale down to 0
|
||||
err := patch(ctx, r.Client, latestRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
|
||||
obj.Spec.Replicas = 0
|
||||
})
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to patch runner set to set desired count to 0")
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set")
|
||||
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
|
||||
}
|
||||
@@ -207,30 +294,13 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
}
|
||||
|
||||
// Make sure the AutoscalingListener is up and running in the controller namespace
|
||||
listener := new(v1alpha1.AutoscalingListener)
|
||||
if err := r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, listener); err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
// We don't have a listener
|
||||
log.Info("Creating a new AutoscalingListener for the runner set", "ephemeralRunnerSetName", latestRunnerSet.Name)
|
||||
return r.createAutoScalingListenerForRunnerSet(ctx, autoscalingRunnerSet, latestRunnerSet, log)
|
||||
if !listenerFound {
|
||||
if r.drainingJobs(&latestRunnerSet.Status) {
|
||||
log.Info("Creating a new AutoscalingListener is waiting for the running and pending runners to finish. Waiting for the running and pending runners to finish:", "running", latestRunnerSet.Status.RunningEphemeralRunners, "pending", latestRunnerSet.Status.PendingEphemeralRunners)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
log.Error(err, "Failed to get AutoscalingListener resource")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Our listener pod is out of date, so we need to delete it to get a new recreate.
|
||||
if listener.Labels[LabelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() {
|
||||
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
|
||||
if err := r.Delete(ctx, listener); err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
log.Error(err, "Failed to delete AutoscalingListener resource")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Deleted RunnerScaleSetListener since existing one is out of date")
|
||||
return ctrl.Result{}, nil
|
||||
log.Info("Creating a new AutoscalingListener for the runner set", "ephemeralRunnerSetName", latestRunnerSet.Name)
|
||||
return r.createAutoScalingListenerForRunnerSet(ctx, autoscalingRunnerSet, latestRunnerSet, log)
|
||||
}
|
||||
|
||||
// Update the status of autoscaling runner set.
|
||||
@@ -249,6 +319,16 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Prevents overprovisioning of runners.
|
||||
// We reach this code path when runner scale set has been patched with a new runner spec but there are still running ephemeral runners.
|
||||
// The safest approach is to wait for the running ephemeral runners to finish before creating a new runner set.
|
||||
func (r *AutoscalingRunnerSetReconciler) drainingJobs(latestRunnerSetStatus *v1alpha1.EphemeralRunnerSetStatus) bool {
|
||||
if r.UpdateStrategy == UpdateStrategyEventual && ((latestRunnerSetStatus.RunningEphemeralRunners + latestRunnerSetStatus.PendingEphemeralRunners) > 0) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
|
||||
logger.Info("Cleaning up the listener")
|
||||
var listener v1alpha1.AutoscalingListener
|
||||
@@ -305,6 +385,29 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (requeue bool, err error) {
|
||||
c := autoscalingRunnerSetFinalizerDependencyCleaner{
|
||||
client: r.Client,
|
||||
autoscalingRunnerSet: autoscalingRunnerSet,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
c.removeKubernetesModeRoleBindingFinalizer(ctx)
|
||||
c.removeKubernetesModeRoleFinalizer(ctx)
|
||||
c.removeKubernetesModeServiceAccountFinalizer(ctx)
|
||||
c.removeNoPermissionServiceAccountFinalizer(ctx)
|
||||
c.removeGitHubSecretFinalizer(ctx)
|
||||
c.removeManagerRoleBindingFinalizer(ctx)
|
||||
c.removeManagerRoleFinalizer(ctx)
|
||||
|
||||
requeue, err = c.result()
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to cleanup finalizer from dependent resource")
|
||||
return true, err
|
||||
}
|
||||
return requeue, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||
logger.Info("Creating a new runner scale set")
|
||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
@@ -467,12 +570,28 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
|
||||
scaleSetId, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
|
||||
if !ok {
|
||||
// Annotation not being present can occur in 3 scenarios
|
||||
// 1. Scale set is never created.
|
||||
// In this case, we don't need to fetch the actions client to delete the scale set that does not exist
|
||||
//
|
||||
// 2. The scale set has been deleted by the controller.
|
||||
// In that case, the controller will clean up annotation because the scale set does not exist anymore.
|
||||
// Removal of the scale set id is also useful because permission cleanup will eventually lose permission
|
||||
// assigned to it on a GitHub secret, causing actions client from secret to result in permission denied
|
||||
//
|
||||
// 3. Annotation is removed manually.
|
||||
// In this case, the controller will treat this as if the scale set is being removed from the actions service
|
||||
// Then, manual deletion of the scale set is required.
|
||||
return nil
|
||||
}
|
||||
logger.Info("Deleting the runner scale set from Actions service")
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
runnerScaleSetId, err := strconv.Atoi(scaleSetId)
|
||||
if err != nil {
|
||||
// If the annotation is not set correctly, or if it does not exist, we are going to get stuck in a loop trying to parse the scale set id.
|
||||
// If the configuration is invalid (secret does not exist for example), we never get to the point to create runner set. But then, manual cleanup
|
||||
// would get stuck finalizing the resource trying to parse annotation indefinitely
|
||||
// If the annotation is not set correctly, we are going to get stuck in a loop trying to parse the scale set id.
|
||||
// If the configuration is invalid (secret does not exist for example), we never got to the point to create runner set.
|
||||
// But then, manual cleanup would get stuck finalizing the resource trying to parse annotation indefinitely
|
||||
logger.Info("autoscaling runner set does not have annotation describing scale set id. Skip deletion", "err", err.Error())
|
||||
return nil
|
||||
}
|
||||
@@ -489,6 +608,14 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
|
||||
return err
|
||||
}
|
||||
|
||||
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||
delete(obj.Annotations, runnerScaleSetIdAnnotationKey)
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to patch autoscaling runner set with annotation removed", "annotation", runnerScaleSetIdAnnotationKey)
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("Deleted the runner scale set from Actions service")
|
||||
return nil
|
||||
}
|
||||
@@ -541,7 +668,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) {
|
||||
list := new(v1alpha1.EphemeralRunnerSetList)
|
||||
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingRunnerSet.Name}); err != nil {
|
||||
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingRunnerSet.Name}); err != nil {
|
||||
return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
|
||||
}
|
||||
|
||||
@@ -634,7 +761,7 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro
|
||||
return []string{owner.Name}
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, autoscalingRunnerSetOwnerKey, groupVersionIndexer); err != nil {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, resourceOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -658,6 +785,328 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
type autoscalingRunnerSetFinalizerDependencyCleaner struct {
|
||||
// configuration fields
|
||||
client client.Client
|
||||
autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
|
||||
logger logr.Logger
|
||||
|
||||
// fields to operate on
|
||||
requeue bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) result() (requeue bool, err error) {
|
||||
return c.requeue, c.err
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleBindingFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
roleBindingName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up kubernetes mode service account",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeRoleBindingName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from container mode kubernetes role binding", "name", roleBindingName)
|
||||
|
||||
roleBinding := new(rbacv1.RoleBinding)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(roleBinding, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Kubernetes mode role binding finalizer has already been removed", "name", roleBindingName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from container mode kubernetes role binding", "name", roleBindingName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch kubernetes mode role binding: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Container mode kubernetes role binding has already been deleted", "name", roleBindingName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
roleName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up kubernetes mode role",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeRoleName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from container mode kubernetes role", "name", roleName)
|
||||
role := new(rbacv1.Role)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: roleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(role, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Kubernetes mode role finalizer has already been removed", "name", roleName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from container mode kubernetes role")
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch kubernetes mode role: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Container mode kubernetes role has already been deleted", "name", roleName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeServiceAccountFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
serviceAccountName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up kubernetes mode role binding",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeServiceAccountName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from container mode kubernetes service account", "name", serviceAccountName)
|
||||
|
||||
serviceAccount := new(corev1.ServiceAccount)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(serviceAccount, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Kubernetes mode service account finalizer has already been removed", "name", serviceAccountName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from container mode kubernetes service account")
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch kubernetes mode service account: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Container mode kubernetes service account has already been deleted", "name", serviceAccountName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServiceAccountFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
serviceAccountName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up no permission service account",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyNoPermissionServiceAccountName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from no permission service account", "name", serviceAccountName)
|
||||
|
||||
serviceAccount := new(corev1.ServiceAccount)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(serviceAccount, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("No permission service account finalizer has already been removed", "name", serviceAccountName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from no permission service account", "name", serviceAccountName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch service account: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("No permission service account has already been deleted", "name", serviceAccountName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
githubSecretName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up no permission service account",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyGitHubSecretName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from GitHub secret", "name", githubSecretName)
|
||||
|
||||
githubSecret := new(corev1.Secret)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: githubSecretName, Namespace: c.autoscalingRunnerSet.Namespace}, githubSecret)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(githubSecret, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("GitHub secret finalizer has already been removed", "name", githubSecretName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, githubSecret, func(obj *corev1.Secret) {
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from GitHub secret", "name", githubSecretName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err) && !kerrors.IsForbidden(err):
|
||||
c.err = fmt.Errorf("failed to fetch GitHub secret: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("GitHub secret has already been deleted", "name", githubSecretName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindingFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
managerRoleBindingName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up manager role binding",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyManagerRoleBindingName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from manager role binding", "name", managerRoleBindingName)
|
||||
|
||||
roleBinding := new(rbacv1.RoleBinding)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(roleBinding, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Manager role binding finalizer has already been removed", "name", managerRoleBindingName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from manager role binding", "name", managerRoleBindingName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch manager role binding: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Manager role binding has already been deleted", "name", managerRoleBindingName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
managerRoleName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up manager role",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyManagerRoleName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from manager role", "name", managerRoleName)
|
||||
|
||||
role := new(rbacv1.Role)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(role, AutoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Manager role finalizer has already been removed", "name", managerRoleName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
|
||||
controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from manager role", "name", managerRoleName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch manager role: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Manager role has already been deleted", "name", managerRoleName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: if this is logic should be used for other resources,
|
||||
// consider using generics
|
||||
type EphemeralRunnerSets struct {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
@@ -23,8 +24,10 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/github/actions/fake"
|
||||
"github.com/actions/actions-runner-controller/github/actions/testserver"
|
||||
@@ -36,19 +39,32 @@ const (
|
||||
autoscalingRunnerSetTestGitHubToken = "gh_token"
|
||||
)
|
||||
|
||||
var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
||||
var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
||||
var ctx context.Context
|
||||
var mgr ctrl.Manager
|
||||
var controller *AutoscalingRunnerSetReconciler
|
||||
var autoscalingNS *corev1.Namespace
|
||||
var autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
|
||||
var configSecret *corev1.Secret
|
||||
|
||||
var originalBuildVersion string
|
||||
buildVersion := "0.1.0"
|
||||
|
||||
BeforeAll(func() {
|
||||
originalBuildVersion = build.Version
|
||||
build.Version = buildVersion
|
||||
})
|
||||
|
||||
AfterAll(func() {
|
||||
build.Version = originalBuildVersion
|
||||
})
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx = context.Background()
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
controller := &AutoscalingRunnerSetReconciler{
|
||||
controller = &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
@@ -65,6 +81,9 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
@@ -278,10 +297,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
||||
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
|
||||
}
|
||||
|
||||
return runnerSetList.Items[0].Labels[LabelKeyRunnerSpecHash], nil
|
||||
return runnerSetList.Items[0].Labels[labelKeyRunnerSpecHash], nil
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[LabelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
|
||||
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[labelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
|
||||
|
||||
// We should create a new listener
|
||||
Eventually(
|
||||
@@ -406,6 +425,110 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
||||
})
|
||||
})
|
||||
|
||||
Context("When updating an AutoscalingRunnerSet with running or pending jobs", func() {
|
||||
It("It should wait for running and pending jobs to finish before applying the update. Update Strategy is set to eventual.", func() {
|
||||
// Switch update strategy to eventual (drain jobs )
|
||||
controller.UpdateStrategy = UpdateStrategyEventual
|
||||
// Wait till the listener is created
|
||||
listener := new(v1alpha1.AutoscalingListener)
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(Succeed(), "Listener should be created")
|
||||
|
||||
// Wait till the ephemeral runner set is created
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
runnerSetList := new(v1alpha1.EphemeralRunnerSetList)
|
||||
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return len(runnerSetList.Items), nil
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created")
|
||||
|
||||
runnerSetList := new(v1alpha1.EphemeralRunnerSetList)
|
||||
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet")
|
||||
|
||||
// Emulate running and pending jobs
|
||||
runnerSet := runnerSetList.Items[0]
|
||||
activeRunnerSet := runnerSet.DeepCopy()
|
||||
activeRunnerSet.Status.CurrentReplicas = 6
|
||||
activeRunnerSet.Status.FailedEphemeralRunners = 1
|
||||
activeRunnerSet.Status.RunningEphemeralRunners = 2
|
||||
activeRunnerSet.Status.PendingEphemeralRunners = 3
|
||||
|
||||
desiredStatus := v1alpha1.AutoscalingRunnerSetStatus{
|
||||
CurrentRunners: activeRunnerSet.Status.CurrentReplicas,
|
||||
State: "",
|
||||
PendingEphemeralRunners: activeRunnerSet.Status.PendingEphemeralRunners,
|
||||
RunningEphemeralRunners: activeRunnerSet.Status.RunningEphemeralRunners,
|
||||
FailedEphemeralRunners: activeRunnerSet.Status.FailedEphemeralRunners,
|
||||
}
|
||||
|
||||
err = k8sClient.Status().Patch(ctx, activeRunnerSet, client.MergeFrom(&runnerSet))
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to patch runner set status")
|
||||
|
||||
Eventually(
|
||||
func() (v1alpha1.AutoscalingRunnerSetStatus, error) {
|
||||
updated := new(v1alpha1.AutoscalingRunnerSet)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, updated)
|
||||
if err != nil {
|
||||
return v1alpha1.AutoscalingRunnerSetStatus{}, fmt.Errorf("failed to get AutoScalingRunnerSet: %w", err)
|
||||
}
|
||||
return updated.Status, nil
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeEquivalentTo(desiredStatus), "AutoScalingRunnerSet status should be updated")
|
||||
|
||||
// Patch the AutoScalingRunnerSet image which should trigger
|
||||
// the recreation of the Listener and EphemeralRunnerSet
|
||||
patched := autoscalingRunnerSet.DeepCopy()
|
||||
patched.Spec.Template.Spec = corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner",
|
||||
Image: "ghcr.io/actions/abcd:1.1.1",
|
||||
},
|
||||
},
|
||||
}
|
||||
// patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
|
||||
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
||||
autoscalingRunnerSet = patched.DeepCopy()
|
||||
|
||||
// The EphemeralRunnerSet should not be recreated
|
||||
Consistently(
|
||||
func() (string, error) {
|
||||
runnerSetList := new(v1alpha1.EphemeralRunnerSetList)
|
||||
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoscalingRunnerSet.Namespace))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to fetch AutoScalingRunnerSet")
|
||||
return runnerSetList.Items[0].Name, nil
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(Equal(activeRunnerSet.Name), "The EphemeralRunnerSet should not be recreated")
|
||||
|
||||
// The listener should not be recreated
|
||||
Consistently(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).ShouldNot(Succeed(), "Listener should not be recreated")
|
||||
})
|
||||
})
|
||||
|
||||
It("Should update Status on EphemeralRunnerSet status Update", func() {
|
||||
ars := new(v1alpha1.AutoscalingRunnerSet)
|
||||
Eventually(
|
||||
@@ -472,7 +595,19 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test AutoScalingController updates", func() {
|
||||
var _ = Describe("Test AutoScalingController updates", Ordered, func() {
|
||||
var originalBuildVersion string
|
||||
buildVersion := "0.1.0"
|
||||
|
||||
BeforeAll(func() {
|
||||
originalBuildVersion = build.Version
|
||||
build.Version = buildVersion
|
||||
})
|
||||
|
||||
AfterAll(func() {
|
||||
build.Version = originalBuildVersion
|
||||
})
|
||||
|
||||
Context("Creating autoscaling runner set with RunnerScaleSetName set", func() {
|
||||
var ctx context.Context
|
||||
var mgr ctrl.Manager
|
||||
@@ -481,6 +616,7 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
||||
var configSecret *corev1.Secret
|
||||
|
||||
BeforeEach(func() {
|
||||
originalBuildVersion = build.Version
|
||||
ctx = context.Background()
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
@@ -526,6 +662,9 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
@@ -571,6 +710,7 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
||||
|
||||
update := autoscalingRunnerSet.DeepCopy()
|
||||
update.Spec.RunnerScaleSetName = "testset_update"
|
||||
|
||||
err = k8sClient.Patch(ctx, update, client.MergeFrom(autoscalingRunnerSet))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update AutoScalingRunnerSet")
|
||||
|
||||
@@ -595,7 +735,18 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test AutoscalingController creation failures", func() {
|
||||
var _ = Describe("Test AutoscalingController creation failures", Ordered, func() {
|
||||
var originalBuildVersion string
|
||||
buildVersion := "0.1.0"
|
||||
|
||||
BeforeAll(func() {
|
||||
originalBuildVersion = build.Version
|
||||
build.Version = buildVersion
|
||||
})
|
||||
|
||||
AfterAll(func() {
|
||||
build.Version = originalBuildVersion
|
||||
})
|
||||
Context("When autoscaling runner set creation fails on the client", func() {
|
||||
var ctx context.Context
|
||||
var mgr ctrl.Manager
|
||||
@@ -626,6 +777,9 @@ var _ = Describe("Test AutoscalingController creation failures", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
@@ -704,7 +858,18 @@ var _ = Describe("Test AutoscalingController creation failures", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test Client optional configuration", func() {
|
||||
var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||
var originalBuildVersion string
|
||||
buildVersion := "0.1.0"
|
||||
|
||||
BeforeAll(func() {
|
||||
originalBuildVersion = build.Version
|
||||
build.Version = buildVersion
|
||||
})
|
||||
|
||||
AfterAll(func() {
|
||||
build.Version = originalBuildVersion
|
||||
})
|
||||
Context("When specifying a proxy", func() {
|
||||
var ctx context.Context
|
||||
var mgr ctrl.Manager
|
||||
@@ -744,6 +909,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "http://example.com/org/repo",
|
||||
@@ -820,6 +988,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "http://example.com/org/repo",
|
||||
@@ -936,6 +1107,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: server.ConfigURLForOrg("my-org"),
|
||||
@@ -986,6 +1160,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
@@ -1036,7 +1213,7 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
g.Expect(listener.Spec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "listener does not have TLS config")
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(Succeed(), "tls config is incorrect")
|
||||
})
|
||||
|
||||
@@ -1047,6 +1224,9 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
@@ -1093,8 +1273,463 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
g.Expect(runnerSet.Spec.EphemeralRunnerSpec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "EphemeralRunnerSpec does not have TLS config")
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test external permissions cleanup", Ordered, func() {
|
||||
var originalBuildVersion string
|
||||
buildVersion := "0.1.0"
|
||||
|
||||
BeforeAll(func() {
|
||||
originalBuildVersion = build.Version
|
||||
build.Version = buildVersion
|
||||
})
|
||||
|
||||
AfterAll(func() {
|
||||
build.Version = originalBuildVersion
|
||||
})
|
||||
|
||||
It("Should clean up kubernetes mode permissions", func() {
|
||||
ctx := context.Background()
|
||||
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
|
||||
|
||||
configSecret := createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
controller := &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
|
||||
startManagers(GinkgoT(), mgr)
|
||||
|
||||
min := 1
|
||||
max := 10
|
||||
autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "gha-runner-scale-set",
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
AnnotationKeyKubernetesModeRoleBindingName: "kube-mode-role-binding",
|
||||
AnnotationKeyKubernetesModeRoleName: "kube-mode-role",
|
||||
AnnotationKeyKubernetesModeServiceAccountName: "kube-mode-service-account",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
MaxRunners: &max,
|
||||
MinRunners: &min,
|
||||
RunnerGroup: "testgroup",
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner",
|
||||
Image: "ghcr.io/actions/runner",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, role)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode role")
|
||||
|
||||
serviceAccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, serviceAccount)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode service account")
|
||||
|
||||
roleBinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: serviceAccount.Name,
|
||||
Namespace: serviceAccount.Namespace,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
// Kind is the type of resource being referenced
|
||||
Kind: "Role",
|
||||
Name: role.Name,
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, roleBinding)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode role binding")
|
||||
|
||||
err = k8sClient.Create(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
|
||||
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
created := new(v1alpha1.AutoscalingRunnerSet)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(created.Finalizers) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return created.Finalizers[0], nil
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
|
||||
|
||||
err = k8sClient.Delete(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete autoscaling runner set")
|
||||
|
||||
err = k8sClient.Delete(ctx, roleBinding)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode role binding")
|
||||
|
||||
err = k8sClient.Delete(ctx, role)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode role")
|
||||
|
||||
err = k8sClient.Delete(ctx, serviceAccount)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode service account")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(rbacv1.RoleBinding)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: roleBinding.Name,
|
||||
Namespace: roleBinding.Namespace,
|
||||
}, r)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role binding to be cleaned up")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(rbacv1.Role)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: role.Name,
|
||||
Namespace: role.Namespace,
|
||||
}, r)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role to be cleaned up")
|
||||
})
|
||||
|
||||
It("Should clean up manager permissions and no-permission service account", func() {
|
||||
ctx := context.Background()
|
||||
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
|
||||
|
||||
controller := &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
|
||||
startManagers(GinkgoT(), mgr)
|
||||
|
||||
min := 1
|
||||
max := 10
|
||||
autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "gha-runner-scale-set",
|
||||
LabelKeyKubernetesVersion: buildVersion,
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
AnnotationKeyManagerRoleName: "manager-role",
|
||||
AnnotationKeyManagerRoleBindingName: "manager-role-binding",
|
||||
AnnotationKeyGitHubSecretName: "gh-secret-name",
|
||||
AnnotationKeyNoPermissionServiceAccountName: "no-permission-sa",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
MaxRunners: &max,
|
||||
MinRunners: &min,
|
||||
RunnerGroup: "testgroup",
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner",
|
||||
Image: "ghcr.io/actions/runner",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"github_token": []byte(defaultGitHubToken),
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(context.Background(), secret)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create github secret")
|
||||
|
||||
autoscalingRunnerSet.Spec.GitHubConfigSecret = secret.Name
|
||||
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, role)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager role")
|
||||
|
||||
roleBinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: "Role",
|
||||
Name: role.Name,
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, roleBinding)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager role binding")
|
||||
|
||||
noPermissionServiceAccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, noPermissionServiceAccount)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create no permission service account")
|
||||
|
||||
err = k8sClient.Create(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
|
||||
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
created := new(v1alpha1.AutoscalingRunnerSet)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(created.Finalizers) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return created.Finalizers[0], nil
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
|
||||
|
||||
err = k8sClient.Delete(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete autoscaling runner set")
|
||||
|
||||
err = k8sClient.Delete(ctx, noPermissionServiceAccount)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete no permission service account")
|
||||
|
||||
err = k8sClient.Delete(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete GitHub secret")
|
||||
|
||||
err = k8sClient.Delete(ctx, roleBinding)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete manager role binding")
|
||||
|
||||
err = k8sClient.Delete(ctx, role)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete manager role")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(corev1.ServiceAccount)
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
Name: noPermissionServiceAccount.Name,
|
||||
Namespace: noPermissionServiceAccount.Namespace,
|
||||
},
|
||||
r,
|
||||
)
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected no permission service account to be cleaned up")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(corev1.Secret)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: secret.Name,
|
||||
Namespace: secret.Namespace,
|
||||
}, r)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role binding to be cleaned up")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(rbacv1.RoleBinding)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: roleBinding.Name,
|
||||
Namespace: roleBinding.Namespace,
|
||||
}, r)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role binding to be cleaned up")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(rbacv1.Role)
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
Name: role.Name,
|
||||
Namespace: role.Namespace,
|
||||
},
|
||||
r,
|
||||
)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role to be cleaned up")
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test resource version and build version mismatch", func() {
|
||||
It("Should delete and recreate the autoscaling runner set to match the build version", func() {
|
||||
ctx := context.Background()
|
||||
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
|
||||
|
||||
configSecret := createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
controller := &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
|
||||
originalVersion := build.Version
|
||||
defer func() {
|
||||
build.Version = originalVersion
|
||||
}()
|
||||
build.Version = "0.2.0"
|
||||
|
||||
min := 1
|
||||
max := 10
|
||||
autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "gha-runner-scale-set",
|
||||
"app.kubernetes.io/version": "0.1.0",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
AnnotationKeyKubernetesModeRoleBindingName: "kube-mode-role-binding",
|
||||
AnnotationKeyKubernetesModeRoleName: "kube-mode-role",
|
||||
AnnotationKeyKubernetesModeServiceAccountName: "kube-mode-service-account",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
MaxRunners: &max,
|
||||
MinRunners: &min,
|
||||
RunnerGroup: "testgroup",
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner",
|
||||
Image: "ghcr.io/actions/runner",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// create autoscaling runner set before starting a manager
|
||||
err = k8sClient.Create(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
startManagers(GinkgoT(), mgr)
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
ars := new(v1alpha1.AutoscalingRunnerSet)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Name}, ars)
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"github.com/actions/actions-runner-controller/logging"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
||||
LabelKeyPodTemplateHash = "pod-template-hash"
|
||||
@@ -16,3 +21,53 @@ const (
|
||||
EnvVarHTTPSProxy = "https_proxy"
|
||||
EnvVarNoProxy = "no_proxy"
|
||||
)
|
||||
|
||||
// Labels applied to resources
|
||||
const (
|
||||
// Kubernetes labels
|
||||
LabelKeyKubernetesPartOf = "app.kubernetes.io/part-of"
|
||||
LabelKeyKubernetesComponent = "app.kubernetes.io/component"
|
||||
LabelKeyKubernetesVersion = "app.kubernetes.io/version"
|
||||
|
||||
// Github labels
|
||||
LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name"
|
||||
LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace"
|
||||
LabelKeyGitHubEnterprise = "actions.github.com/enterprise"
|
||||
LabelKeyGitHubOrganization = "actions.github.com/organization"
|
||||
LabelKeyGitHubRepository = "actions.github.com/repository"
|
||||
)
|
||||
|
||||
// Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running
|
||||
const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
|
||||
|
||||
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
|
||||
|
||||
// Labels applied to listener roles
|
||||
const (
|
||||
labelKeyListenerName = "auto-scaling-listener-name"
|
||||
labelKeyListenerNamespace = "auto-scaling-listener-namespace"
|
||||
)
|
||||
|
||||
// Annotations applied for later cleanup of resources
|
||||
const (
|
||||
AnnotationKeyManagerRoleBindingName = "actions.github.com/cleanup-manager-role-binding"
|
||||
AnnotationKeyManagerRoleName = "actions.github.com/cleanup-manager-role-name"
|
||||
AnnotationKeyKubernetesModeRoleName = "actions.github.com/cleanup-kubernetes-mode-role-name"
|
||||
AnnotationKeyKubernetesModeRoleBindingName = "actions.github.com/cleanup-kubernetes-mode-role-binding-name"
|
||||
AnnotationKeyKubernetesModeServiceAccountName = "actions.github.com/cleanup-kubernetes-mode-service-account-name"
|
||||
AnnotationKeyGitHubSecretName = "actions.github.com/cleanup-github-secret-name"
|
||||
AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name"
|
||||
)
|
||||
|
||||
// DefaultScaleSetListenerImagePullPolicy is the default pull policy applied
|
||||
// to the listener when ImagePullPolicy is not specified
|
||||
const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent
|
||||
|
||||
// DefaultScaleSetListenerLogLevel is the default log level applied
|
||||
const DefaultScaleSetListenerLogLevel = string(logging.LogLevelDebug)
|
||||
|
||||
// DefaultScaleSetListenerLogFormat is the default log format applied
|
||||
const DefaultScaleSetListenerLogFormat = string(logging.LogFormatText)
|
||||
|
||||
// ownerKey is field selector matching the owner name of a particular resource
|
||||
const resourceOwnerKey = ".metadata.controller"
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
"go.uber.org/multierr"
|
||||
@@ -40,8 +41,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
ephemeralRunnerSetReconcilerOwnerKey = ".metadata.controller"
|
||||
ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer"
|
||||
ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer"
|
||||
)
|
||||
|
||||
// EphemeralRunnerSetReconciler reconciles a EphemeralRunnerSet object
|
||||
@@ -51,11 +51,14 @@ type EphemeralRunnerSetReconciler struct {
|
||||
Scheme *runtime.Scheme
|
||||
ActionsClient actions.MultiClient
|
||||
|
||||
PublishMetrics bool
|
||||
|
||||
resourceBuilder resourceBuilder
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/finalizers,verbs=update;patch
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get
|
||||
|
||||
@@ -146,7 +149,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
ctx,
|
||||
ephemeralRunnerList,
|
||||
client.InNamespace(req.Namespace),
|
||||
client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: req.Name},
|
||||
client.MatchingFields{resourceOwnerKey: req.Name},
|
||||
)
|
||||
if err != nil {
|
||||
log.Error(err, "Unable to list child ephemeral runners")
|
||||
@@ -163,6 +166,29 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
"deleting", len(deletingEphemeralRunners),
|
||||
)
|
||||
|
||||
if r.PublishMetrics {
|
||||
githubConfigURL := ephemeralRunnerSet.Spec.EphemeralRunnerSpec.GitHubConfigUrl
|
||||
parsedURL, err := actions.ParseGitHubConfigFromURL(githubConfigURL)
|
||||
if err != nil {
|
||||
log.Error(err, "Github Config URL is invalid", "URL", githubConfigURL)
|
||||
// stop reconciling on this object
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
metrics.SetEphemeralRunnerCountsByStatus(
|
||||
metrics.CommonLabels{
|
||||
Name: ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName],
|
||||
Namespace: ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace],
|
||||
Repository: parsedURL.Repository,
|
||||
Organization: parsedURL.Organization,
|
||||
Enterprise: parsedURL.Enterprise,
|
||||
},
|
||||
len(pendingEphemeralRunners),
|
||||
len(runningEphemeralRunners),
|
||||
len(failedEphemeralRunners),
|
||||
)
|
||||
}
|
||||
|
||||
// cleanup finished runners and proceed
|
||||
var errs []error
|
||||
for i := range finishedEphemeralRunners {
|
||||
@@ -242,7 +268,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, e
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) {
|
||||
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
|
||||
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: ephemeralRunnerSet.Name})
|
||||
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: ephemeralRunnerSet.Name})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list child ephemeral runners: %v", err)
|
||||
}
|
||||
@@ -521,7 +547,7 @@ func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Conte
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
// Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups.
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, ephemeralRunnerSetReconcilerOwnerKey, func(rawObj client.Object) []string {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, resourceOwnerKey, func(rawObj client.Object) []string {
|
||||
groupVersion := v1alpha1.GroupVersion.String()
|
||||
|
||||
// grab the job object, extract the owner...
|
||||
|
||||
92
controllers/actions.github.com/metrics/metrics.go
Normal file
92
controllers/actions.github.com/metrics/metrics.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
)
|
||||
|
||||
var githubScaleSetControllerSubsystem = "gha_controller"
|
||||
|
||||
var labels = []string{
|
||||
"name",
|
||||
"namespace",
|
||||
"repository",
|
||||
"organization",
|
||||
"enterprise",
|
||||
}
|
||||
|
||||
type CommonLabels struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Repository string
|
||||
Organization string
|
||||
Enterprise string
|
||||
}
|
||||
|
||||
func (l *CommonLabels) labels() prometheus.Labels {
|
||||
return prometheus.Labels{
|
||||
"name": l.Name,
|
||||
"namespace": l.Namespace,
|
||||
"repository": l.Repository,
|
||||
"organization": l.Organization,
|
||||
"enterprise": l.Enterprise,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
pendingEphemeralRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetControllerSubsystem,
|
||||
Name: "pending_ephemeral_runners",
|
||||
Help: "Number of ephemeral runners in a pending state.",
|
||||
},
|
||||
labels,
|
||||
)
|
||||
runningEphemeralRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetControllerSubsystem,
|
||||
Name: "running_ephemeral_runners",
|
||||
Help: "Number of ephemeral runners in a running state.",
|
||||
},
|
||||
labels,
|
||||
)
|
||||
failedEphemeralRunners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetControllerSubsystem,
|
||||
Name: "failed_ephemeral_runners",
|
||||
Help: "Number of ephemeral runners in a failed state.",
|
||||
},
|
||||
labels,
|
||||
)
|
||||
runningListeners = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: githubScaleSetControllerSubsystem,
|
||||
Name: "running_listeners",
|
||||
Help: "Number of listeners in a running state.",
|
||||
},
|
||||
labels,
|
||||
)
|
||||
)
|
||||
|
||||
func RegisterMetrics() {
|
||||
metrics.Registry.MustRegister(
|
||||
pendingEphemeralRunners,
|
||||
runningEphemeralRunners,
|
||||
failedEphemeralRunners,
|
||||
runningListeners,
|
||||
)
|
||||
}
|
||||
|
||||
func SetEphemeralRunnerCountsByStatus(commonLabels CommonLabels, pending, running, failed int) {
|
||||
pendingEphemeralRunners.With(commonLabels.labels()).Set(float64(pending))
|
||||
runningEphemeralRunners.With(commonLabels.labels()).Set(float64(running))
|
||||
failedEphemeralRunners.With(commonLabels.labels()).Set(float64(failed))
|
||||
}
|
||||
|
||||
func AddRunningListener(commonLabels CommonLabels) {
|
||||
runningListeners.With(commonLabels.labels()).Set(1)
|
||||
}
|
||||
|
||||
func SubRunningListener(commonLabels CommonLabels) {
|
||||
runningListeners.With(commonLabels.labels()).Set(0)
|
||||
}
|
||||
@@ -4,12 +4,14 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/hash"
|
||||
"github.com/actions/actions-runner-controller/logging"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -20,29 +22,6 @@ const (
|
||||
jitTokenKey = "jitToken"
|
||||
)
|
||||
|
||||
// Labels applied to resources
|
||||
const (
|
||||
// Kubernetes labels
|
||||
LabelKeyKubernetesPartOf = "app.kubernetes.io/part-of"
|
||||
LabelKeyKubernetesComponent = "app.kubernetes.io/component"
|
||||
LabelKeyKubernetesVersion = "app.kubernetes.io/version"
|
||||
|
||||
// Github labels
|
||||
LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name"
|
||||
LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace"
|
||||
LabelKeyGitHubEnterprise = "actions.github.com/enterprise"
|
||||
LabelKeyGitHubOrganization = "actions.github.com/organization"
|
||||
LabelKeyGitHubRepository = "actions.github.com/repository"
|
||||
)
|
||||
|
||||
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
|
||||
|
||||
// Labels applied to listener roles
|
||||
const (
|
||||
labelKeyListenerName = "auto-scaling-listener-name"
|
||||
labelKeyListenerNamespace = "auto-scaling-listener-namespace"
|
||||
)
|
||||
|
||||
var commonLabelKeys = [...]string{
|
||||
LabelKeyKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent,
|
||||
@@ -56,9 +35,102 @@ var commonLabelKeys = [...]string{
|
||||
|
||||
const labelValueKubernetesPartOf = "gha-runner-scale-set"
|
||||
|
||||
// scaleSetListenerImagePullPolicy is applied to all listeners
|
||||
var scaleSetListenerImagePullPolicy = DefaultScaleSetListenerImagePullPolicy
|
||||
|
||||
func SetListenerImagePullPolicy(pullPolicy string) bool {
|
||||
switch p := corev1.PullPolicy(pullPolicy); p {
|
||||
case corev1.PullAlways, corev1.PullNever, corev1.PullIfNotPresent:
|
||||
scaleSetListenerImagePullPolicy = p
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
var scaleSetListenerLogLevel = DefaultScaleSetListenerLogLevel
|
||||
var scaleSetListenerLogFormat = DefaultScaleSetListenerLogFormat
|
||||
|
||||
func SetListenerLoggingParameters(level string, format string) bool {
|
||||
switch level {
|
||||
case logging.LogLevelDebug, logging.LogLevelInfo, logging.LogLevelWarn, logging.LogLevelError:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
switch format {
|
||||
case logging.LogFormatJSON, logging.LogFormatText:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
scaleSetListenerLogLevel = level
|
||||
scaleSetListenerLogFormat = format
|
||||
return true
|
||||
}
|
||||
|
||||
type resourceBuilder struct{}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
|
||||
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
effectiveMinRunners := 0
|
||||
effectiveMaxRunners := math.MaxInt32
|
||||
if autoscalingRunnerSet.Spec.MaxRunners != nil {
|
||||
effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners
|
||||
}
|
||||
if autoscalingRunnerSet.Spec.MinRunners != nil {
|
||||
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
|
||||
}
|
||||
|
||||
labels := map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent: "runner-scale-set-listener",
|
||||
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
labelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
|
||||
}
|
||||
|
||||
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil {
|
||||
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
|
||||
}
|
||||
|
||||
autoscalingListener := &v1alpha1.AutoscalingListener{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerName(autoscalingRunnerSet),
|
||||
Namespace: namespace,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingListenerSpec{
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
|
||||
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
|
||||
MinRunners: effectiveMinRunners,
|
||||
MaxRunners: effectiveMaxRunners,
|
||||
Image: image,
|
||||
ImagePullPolicy: scaleSetListenerImagePullPolicy,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
},
|
||||
}
|
||||
|
||||
return autoscalingListener, nil
|
||||
}
|
||||
|
||||
type listenerMetricsServerConfig struct {
|
||||
addr string
|
||||
endpoint string
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
|
||||
listenerEnv := []corev1.EnvVar{
|
||||
{
|
||||
Name: "GITHUB_CONFIGURE_URL",
|
||||
@@ -84,6 +156,18 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
||||
Name: "GITHUB_RUNNER_SCALE_SET_ID",
|
||||
Value: strconv.Itoa(autoscalingListener.Spec.RunnerScaleSetId),
|
||||
},
|
||||
{
|
||||
Name: "GITHUB_RUNNER_SCALE_SET_NAME",
|
||||
Value: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||
},
|
||||
{
|
||||
Name: "GITHUB_RUNNER_LOG_LEVEL",
|
||||
Value: scaleSetListenerLogLevel,
|
||||
},
|
||||
{
|
||||
Name: "GITHUB_RUNNER_LOG_FORMAT",
|
||||
Value: scaleSetListenerLogFormat,
|
||||
},
|
||||
}
|
||||
listenerEnv = append(listenerEnv, envs...)
|
||||
|
||||
@@ -143,6 +227,38 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
||||
})
|
||||
}
|
||||
|
||||
var ports []corev1.ContainerPort
|
||||
if metricsConfig != nil && len(metricsConfig.addr) != 0 {
|
||||
listenerEnv = append(
|
||||
listenerEnv,
|
||||
corev1.EnvVar{
|
||||
Name: "GITHUB_METRICS_ADDR",
|
||||
Value: metricsConfig.addr,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "GITHUB_METRICS_ENDPOINT",
|
||||
Value: metricsConfig.endpoint,
|
||||
},
|
||||
)
|
||||
|
||||
_, portStr, err := net.SplitHostPort(metricsConfig.addr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to split host:port for metrics address: %v", err)
|
||||
}
|
||||
port, err := strconv.ParseInt(portStr, 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert port %q to int32: %v", portStr, err)
|
||||
}
|
||||
ports = append(
|
||||
ports,
|
||||
corev1.ContainerPort{
|
||||
ContainerPort: int32(port),
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
Name: "metrics",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
podSpec := corev1.PodSpec{
|
||||
ServiceAccountName: serviceAccount.Name,
|
||||
Containers: []corev1.Container{
|
||||
@@ -150,10 +266,11 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
||||
Name: autoscalingListenerContainerName,
|
||||
Image: autoscalingListener.Spec.Image,
|
||||
Env: listenerEnv,
|
||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||
ImagePullPolicy: autoscalingListener.Spec.ImagePullPolicy,
|
||||
Command: []string{
|
||||
"/github-runnerscaleset-listener",
|
||||
},
|
||||
Ports: ports,
|
||||
},
|
||||
},
|
||||
ImagePullSecrets: autoscalingListener.Spec.ImagePullSecrets,
|
||||
@@ -178,55 +295,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
||||
Spec: podSpec,
|
||||
}
|
||||
|
||||
return newRunnerScaleSetListenerPod
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||
|
||||
newLabels := map[string]string{
|
||||
LabelKeyRunnerSpecHash: runnerSpecHash,
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent: "runner-set",
|
||||
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
}
|
||||
|
||||
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil {
|
||||
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
|
||||
}
|
||||
|
||||
newAnnotations := map[string]string{
|
||||
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
|
||||
}
|
||||
|
||||
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
|
||||
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
|
||||
Labels: newLabels,
|
||||
Annotations: newAnnotations,
|
||||
},
|
||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||
Replicas: 0,
|
||||
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return newEphemeralRunnerSet, nil
|
||||
return newRunnerScaleSetListenerPod, nil
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
|
||||
@@ -318,59 +387,52 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
|
||||
return newListenerSecret
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
||||
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||
|
||||
effectiveMinRunners := 0
|
||||
effectiveMaxRunners := math.MaxInt32
|
||||
if autoscalingRunnerSet.Spec.MaxRunners != nil {
|
||||
effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners
|
||||
}
|
||||
if autoscalingRunnerSet.Spec.MinRunners != nil {
|
||||
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
|
||||
labels := map[string]string{
|
||||
labelKeyRunnerSpecHash: runnerSpecHash,
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent: "runner-set",
|
||||
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
}
|
||||
|
||||
githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse github config from url: %v", err)
|
||||
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, labels); err != nil {
|
||||
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
|
||||
}
|
||||
|
||||
autoscalingListener := &v1alpha1.AutoscalingListener{
|
||||
newAnnotations := map[string]string{
|
||||
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
|
||||
}
|
||||
|
||||
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerName(autoscalingRunnerSet),
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent: "runner-scale-set-listener",
|
||||
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
LabelKeyGitHubEnterprise: githubConfig.Enterprise,
|
||||
LabelKeyGitHubOrganization: githubConfig.Organization,
|
||||
LabelKeyGitHubRepository: githubConfig.Repository,
|
||||
LabelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
|
||||
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
|
||||
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
|
||||
Labels: labels,
|
||||
Annotations: newAnnotations,
|
||||
},
|
||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||
Replicas: 0,
|
||||
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingListenerSpec{
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
|
||||
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
|
||||
MinRunners: effectiveMinRunners,
|
||||
MaxRunners: effectiveMaxRunners,
|
||||
Image: image,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
},
|
||||
}
|
||||
|
||||
return autoscalingListener, nil
|
||||
return newEphemeralRunnerSet, nil
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
|
||||
@@ -554,14 +616,23 @@ func applyGitHubURLLabels(url string, labels map[string]string) error {
|
||||
}
|
||||
|
||||
if len(githubConfig.Enterprise) > 0 {
|
||||
labels[LabelKeyGitHubEnterprise] = githubConfig.Enterprise
|
||||
labels[LabelKeyGitHubEnterprise] = trimLabelValue(githubConfig.Enterprise)
|
||||
}
|
||||
if len(githubConfig.Organization) > 0 {
|
||||
labels[LabelKeyGitHubOrganization] = githubConfig.Organization
|
||||
labels[LabelKeyGitHubOrganization] = trimLabelValue(githubConfig.Organization)
|
||||
}
|
||||
if len(githubConfig.Repository) > 0 {
|
||||
labels[LabelKeyGitHubRepository] = githubConfig.Repository
|
||||
labels[LabelKeyGitHubRepository] = trimLabelValue(githubConfig.Repository)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const trimLabelVauleSuffix = "-trim"
|
||||
|
||||
func trimLabelValue(val string) string {
|
||||
if len(val) > 63 {
|
||||
return val[:63-len(trimLabelVauleSuffix)] + trimLabelVauleSuffix
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package actionsgithubcom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
@@ -36,7 +38,7 @@ func TestLabelPropagation(t *testing.T) {
|
||||
assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf])
|
||||
assert.Equal(t, "runner-set", ephemeralRunnerSet.Labels[LabelKeyKubernetesComponent])
|
||||
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], ephemeralRunnerSet.Labels[LabelKeyKubernetesVersion])
|
||||
assert.NotEmpty(t, ephemeralRunnerSet.Labels[LabelKeyRunnerSpecHash])
|
||||
assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash])
|
||||
assert.Equal(t, autoscalingRunnerSet.Name, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName])
|
||||
assert.Equal(t, autoscalingRunnerSet.Namespace, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace])
|
||||
assert.Equal(t, "", ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise])
|
||||
@@ -49,7 +51,7 @@ func TestLabelPropagation(t *testing.T) {
|
||||
assert.Equal(t, labelValueKubernetesPartOf, listener.Labels[LabelKeyKubernetesPartOf])
|
||||
assert.Equal(t, "runner-scale-set-listener", listener.Labels[LabelKeyKubernetesComponent])
|
||||
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], listener.Labels[LabelKeyKubernetesVersion])
|
||||
assert.NotEmpty(t, ephemeralRunnerSet.Labels[LabelKeyRunnerSpecHash])
|
||||
assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash])
|
||||
assert.Equal(t, autoscalingRunnerSet.Name, listener.Labels[LabelKeyGitHubScaleSetName])
|
||||
assert.Equal(t, autoscalingRunnerSet.Namespace, listener.Labels[LabelKeyGitHubScaleSetNamespace])
|
||||
assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise])
|
||||
@@ -66,7 +68,8 @@ func TestLabelPropagation(t *testing.T) {
|
||||
Name: "test",
|
||||
},
|
||||
}
|
||||
listenerPod := b.newScaleSetListenerPod(listener, listenerServiceAccount, listenerSecret)
|
||||
listenerPod, err := b.newScaleSetListenerPod(listener, listenerServiceAccount, listenerSecret, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, listenerPod.Labels, listener.Labels)
|
||||
|
||||
ephemeralRunner := b.newEphemeralRunner(ephemeralRunnerSet)
|
||||
@@ -91,3 +94,70 @@ func TestLabelPropagation(t *testing.T) {
|
||||
assert.Equal(t, ephemeralRunner.Labels[key], pod.Labels[key])
|
||||
}
|
||||
}
|
||||
|
||||
func TestGitHubURLTrimLabelValues(t *testing.T) {
|
||||
enterprise := strings.Repeat("a", 64)
|
||||
organization := strings.Repeat("b", 64)
|
||||
repository := strings.Repeat("c", 64)
|
||||
|
||||
autoscalingRunnerSet := v1alpha1.AutoscalingRunnerSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-scale-set",
|
||||
Namespace: "test-ns",
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesVersion: "0.2.0",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
runnerScaleSetIdAnnotationKey: "1",
|
||||
AnnotationKeyGitHubRunnerGroupName: "test-group",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("org/repo", func(t *testing.T) {
|
||||
autoscalingRunnerSet := autoscalingRunnerSet.DeepCopy()
|
||||
autoscalingRunnerSet.Spec = v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: fmt.Sprintf("https://github.com/%s/%s", organization, repository),
|
||||
}
|
||||
|
||||
var b resourceBuilder
|
||||
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 0)
|
||||
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization], 63)
|
||||
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubRepository], 63)
|
||||
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization], trimLabelVauleSuffix))
|
||||
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubRepository], trimLabelVauleSuffix))
|
||||
|
||||
listener, err := b.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, listener.Labels[LabelKeyGitHubEnterprise], 0)
|
||||
assert.Len(t, listener.Labels[LabelKeyGitHubOrganization], 63)
|
||||
assert.Len(t, listener.Labels[LabelKeyGitHubRepository], 63)
|
||||
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization], trimLabelVauleSuffix))
|
||||
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubRepository], trimLabelVauleSuffix))
|
||||
})
|
||||
|
||||
t.Run("enterprise", func(t *testing.T) {
|
||||
autoscalingRunnerSet := autoscalingRunnerSet.DeepCopy()
|
||||
autoscalingRunnerSet.Spec = v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: fmt.Sprintf("https://github.com/enterprises/%s", enterprise),
|
||||
}
|
||||
|
||||
var b resourceBuilder
|
||||
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 63)
|
||||
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], trimLabelVauleSuffix))
|
||||
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization], 0)
|
||||
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubRepository], 0)
|
||||
|
||||
listener, err := b.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, listener.Labels[LabelKeyGitHubEnterprise], 63)
|
||||
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], trimLabelVauleSuffix))
|
||||
assert.Len(t, listener.Labels[LabelKeyGitHubOrganization], 0)
|
||||
assert.Len(t, listener.Labels[LabelKeyGitHubRepository], 0)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1"
|
||||
prometheus_metrics "github.com/actions/actions-runner-controller/controllers/actions.summerwind.net/metrics"
|
||||
arcgithub "github.com/actions/actions-runner-controller/github"
|
||||
"github.com/google/go-github/v47/github"
|
||||
"github.com/google/go-github/v52/github"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
@@ -118,10 +118,10 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
||||
}
|
||||
|
||||
var total, inProgress, queued, completed, unknown int
|
||||
type callback func()
|
||||
listWorkflowJobs := func(user string, repoName string, runID int64, fallback_cb callback) {
|
||||
listWorkflowJobs := func(user string, repoName string, runID int64) {
|
||||
if runID == 0 {
|
||||
fallback_cb()
|
||||
// should not happen in reality
|
||||
r.Log.Info("Detected run with no runID of 0, ignoring the case and not scaling.", "repo_name", repoName, "run_id", runID)
|
||||
return
|
||||
}
|
||||
opt := github.ListWorkflowJobsOptions{ListOptions: github.ListOptions{PerPage: 50}}
|
||||
@@ -139,7 +139,8 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
||||
opt.Page = resp.NextPage
|
||||
}
|
||||
if len(allJobs) == 0 {
|
||||
fallback_cb()
|
||||
// GitHub API can return run with empty job array - should be ignored
|
||||
r.Log.Info("Detected run with no jobs, ignoring the case and not scaling.", "repo_name", repoName, "run_id", runID)
|
||||
} else {
|
||||
JOB:
|
||||
for _, job := range allJobs {
|
||||
@@ -201,9 +202,9 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
||||
case "completed":
|
||||
completed++
|
||||
case "in_progress":
|
||||
listWorkflowJobs(user, repoName, run.GetID(), func() { inProgress++ })
|
||||
listWorkflowJobs(user, repoName, run.GetID())
|
||||
case "queued":
|
||||
listWorkflowJobs(user, repoName, run.GetID(), func() { queued++ })
|
||||
listWorkflowJobs(user, repoName, run.GetID())
|
||||
default:
|
||||
unknown++
|
||||
}
|
||||
|
||||
@@ -61,8 +61,9 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
want int
|
||||
err string
|
||||
}{
|
||||
// case_0
|
||||
// Legacy functionality
|
||||
// 3 demanded, max at 3
|
||||
// 0 demanded due to zero runID, min at 2
|
||||
{
|
||||
repo: "test/valid",
|
||||
min: intPtr(2),
|
||||
@@ -70,9 +71,10 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
want: 2,
|
||||
},
|
||||
// Explicitly speified the default `self-hosted` label which is ignored by the simulator,
|
||||
// case_1
|
||||
// Explicitly specified the default `self-hosted` label which is ignored by the simulator,
|
||||
// as we assume that GitHub Actions automatically associates the `self-hosted` label to every self-hosted runner.
|
||||
// 3 demanded, max at 3
|
||||
{
|
||||
@@ -80,11 +82,17 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
labels: []string{"self-hosted"},
|
||||
min: intPtr(2),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"id": 1, "status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}]}"`,
|
||||
workflowJobs: map[int]string{
|
||||
1: `{"jobs": [{"status": "queued", "labels":["self-hosted"]}]}`,
|
||||
2: `{"jobs": [{"status": "in_progress", "labels":["self-hosted"]}]}`,
|
||||
3: `{"jobs": [{"status": "in_progress", "labels":["self-hosted"]}]}`,
|
||||
},
|
||||
want: 3,
|
||||
},
|
||||
// case_2
|
||||
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
||||
{
|
||||
repo: "test/valid",
|
||||
@@ -97,6 +105,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
},
|
||||
// case_3
|
||||
// 3 demanded, max at 2
|
||||
{
|
||||
repo: "test/valid",
|
||||
@@ -107,6 +116,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// case_4
|
||||
// 2 demanded, min at 2
|
||||
{
|
||||
repo: "test/valid",
|
||||
@@ -117,6 +127,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// case_5
|
||||
// 1 demanded, min at 2
|
||||
{
|
||||
repo: "test/valid",
|
||||
@@ -127,6 +138,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// case_6
|
||||
// 1 demanded, min at 2
|
||||
{
|
||||
repo: "test/valid",
|
||||
@@ -137,6 +149,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// case_7
|
||||
// 1 demanded, min at 1
|
||||
{
|
||||
repo: "test/valid",
|
||||
@@ -147,6 +160,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
want: 1,
|
||||
},
|
||||
// case_8
|
||||
// 1 demanded, min at 1
|
||||
{
|
||||
repo: "test/valid",
|
||||
@@ -157,6 +171,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 1,
|
||||
},
|
||||
// case_9
|
||||
// fixed at 3
|
||||
{
|
||||
repo: "test/valid",
|
||||
@@ -166,9 +181,36 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
want: 1,
|
||||
},
|
||||
// Case for empty GitHub Actions reponse - should not trigger scale up
|
||||
{
|
||||
description: "GitHub Actions Jobs Array is empty - no scale up",
|
||||
repo: "test/valid",
|
||||
min: intPtr(0),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
workflowJobs: map[int]string{
|
||||
1: `{"jobs": []}`,
|
||||
},
|
||||
want: 0,
|
||||
},
|
||||
// Case for hosted GitHub Actions run
|
||||
{
|
||||
description: "Hosted GitHub Actions run - no scale up",
|
||||
repo: "test/valid",
|
||||
min: intPtr(0),
|
||||
max: intPtr(3),
|
||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"id": 1, "status":"queued"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"id": 1, "status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
workflowJobs: map[int]string{
|
||||
1: `{"jobs": [{"status":"queued"}]}`,
|
||||
},
|
||||
want: 0,
|
||||
},
|
||||
|
||||
{
|
||||
description: "Job-level autoscaling with no explicit runner label (runners have implicit self-hosted, requested self-hosted, 5 jobs from 3 workflows)",
|
||||
repo: "test/valid",
|
||||
@@ -422,7 +464,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
want int
|
||||
err string
|
||||
}{
|
||||
// 3 demanded, max at 3
|
||||
// case_0
|
||||
// 0 demanded due to zero runID, min at 2
|
||||
{
|
||||
org: "test",
|
||||
repos: []string{"valid"},
|
||||
@@ -431,8 +474,9 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
want: 2,
|
||||
},
|
||||
// case_1
|
||||
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
||||
{
|
||||
org: "test",
|
||||
@@ -446,6 +490,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
},
|
||||
// case_2
|
||||
// 3 demanded, max at 2
|
||||
{
|
||||
org: "test",
|
||||
@@ -457,6 +502,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// case_3
|
||||
// 2 demanded, min at 2
|
||||
{
|
||||
org: "test",
|
||||
@@ -468,6 +514,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// case_4
|
||||
// 1 demanded, min at 2
|
||||
{
|
||||
org: "test",
|
||||
@@ -479,6 +526,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
want: 2,
|
||||
},
|
||||
// case_5
|
||||
// 1 demanded, min at 2
|
||||
{
|
||||
org: "test",
|
||||
@@ -512,6 +560,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||
want: 1,
|
||||
},
|
||||
// case_6
|
||||
// fixed at 3
|
||||
{
|
||||
org: "test",
|
||||
@@ -522,8 +571,9 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
want: 1,
|
||||
},
|
||||
// case_7
|
||||
// org runner, fixed at 3
|
||||
{
|
||||
org: "test",
|
||||
@@ -534,8 +584,9 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`,
|
||||
want: 3,
|
||||
want: 1,
|
||||
},
|
||||
// case_8
|
||||
// org runner, 1 demanded, min at 1, no repos
|
||||
{
|
||||
org: "test",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user