Compare commits

...

59 Commits

Author SHA1 Message Date
github-actions[bot]
311a1688fc Updates: container-hooks to v0.8.0 2025-10-06 09:07:50 +00:00
Nikola Jokic
e3ed1ba226 Introduce new kubernetes-novolume mode (#4250)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-10-03 12:03:38 +02:00
dependabot[bot]
652bd99439 Bump the actions group across 1 directory with 5 updates (#4262)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-01 17:27:52 +02:00
Yusuke Kuoka
f731873df9 Add workflow name and target labels (#4240) 2025-09-30 16:01:51 +02:00
Nikola Jokic
088e2a3a90 Remove ephemeral runner when exit code != 0 and is patched with the job (#4239) 2025-09-17 21:40:37 +02:00
Dennis Stone
2035e13724 Update CODEOWNERS to include new maintainer (#4253) 2025-09-17 21:33:38 +02:00
Nikola Jokic
04b966dfec Update CODEOWNERS (#4251) 2025-09-17 17:49:12 +02:00
zkpepe
0a0be027fd docs: fix repo path typo (#4229) 2025-08-27 16:17:52 +02:00
Nikola Jokic
ddc2918a48 Requeue if create pod returns already exists error (#4201) 2025-08-14 17:00:48 +02:00
github-actions[bot]
0e006bb0ff Updates: runner to v2.328.0 (#4209)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-08-14 10:44:40 +01:00
dependabot[bot]
ce7722aed4 Bump actions/checkout from 4 to 5 in the actions group (#4205)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 23:27:53 +02:00
dependabot[bot]
ad2dd7d787 Bump docker/login-action from 3.4.0 to 3.5.0 in the actions group (#4196)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-06 23:08:12 +02:00
clechevalli
30abbe0cab Fix usage of underscore in Runner Scale Set name (#3545)
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2025-08-06 09:32:49 +01:00
Nikola Jokic
c27541140a Remove JIT config from ephemeral runner status field (#4191) 2025-08-04 12:35:04 +02:00
github-actions[bot]
52d65c333b Updates: runner to v2.327.1 (#4188)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-29 10:07:13 +01:00
Alex Hatzenbuhler
a07dce28bb Remove deprecated preserveUnknownFields from CRDs (#4135) 2025-07-24 08:47:34 +02:00
github-actions[bot]
fb43abf1f3 Updates: runner to v2.327.0 (#4185)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-23 16:19:17 +01:00
Kylie Stradley
9c42f9f2e1 Add Missing Languages to CodeQL Advanced Configuration (#4179) 2025-07-16 11:16:19 +02:00
Cory Calahan
ad826725ce Update example GitHub URLs in values.yaml to include an example for enterprise account-level runners (#4181) 2025-07-16 10:40:38 +02:00
github-actions[bot]
4326693888 Updates: runner to v2.326.0 (#4176)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-14 11:39:15 -04:00
Nikola Jokic
469a0faec4 Remove workflow actions version comments since upgrades are done via dependabot (#4161) 2025-07-01 15:54:59 +02:00
Nikola Jokic
349cc0835e Fix image pull secrets list arguments in the chart (#4164) 2025-07-01 15:28:18 +02:00
Ho Kim
aa14f50e45 feat(runner): add ubuntu 24.04 support (#3598) 2025-07-01 18:34:52 +09:00
dependabot[bot]
ee8ca99e49 Bump the actions group across 1 directory with 5 updates (#4160)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-30 13:55:46 +02:00
adjn
6a13540076 Update CodeQL workflow for v3 (global-run-codeql.yaml) (#4157) 2025-06-30 11:16:11 +02:00
Nikola Jokic
ded39bede6 Prepare 0.12.1 release (#4153) 2025-06-27 13:49:47 +02:00
Nikola Jokic
9890c0592d Explicitly requeue during backoff ephemeral runner (#4152) 2025-06-27 12:05:43 +02:00
Nikola Jokic
3b5693eecb Remove check if runner exists after exit code 0 (#4142) 2025-06-27 11:11:39 +02:00
calx
e6e621a50a Remove duplicate float64 call (#4139) 2025-06-24 11:26:20 +02:00
Mark Huijgen
0b2534ebc9 Fix dind sidecar template (#4128) 2025-06-16 12:14:18 +02:00
Jeev B
e858d67926 Fix indentation of startupProbe attributes in dind sidecar (#4126) 2025-06-14 21:05:53 +02:00
Nikola Jokic
bc6c23609a Remove cache for build-push-action (#4124) 2025-06-13 15:26:55 +02:00
Nikola Jokic
666d0c52c4 Bump build-push-action to 6.18.0 (#4123) 2025-06-13 15:09:27 +02:00
Nikola Jokic
d9826e5244 Prepare 0.12.0 release (#4122) 2025-06-13 14:23:26 +02:00
dependabot[bot]
6f3882c482 Bump github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 (#4120)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-11 21:43:46 +02:00
Nikola Jokic
e46c929241 Azure Key Vault integration to resolve secrets (#4090) 2025-06-11 15:53:33 +02:00
Wim Fournier
d4af75d82e Delete config secret when listener pod gets deleted (#4033)
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2025-06-11 15:53:04 +02:00
Nash Luffman
e335f53037 Add response body to error when fetching access token (#4005)
Co-authored-by: mluffman <mluffman@thoughtmachine.net>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2025-06-11 15:52:38 +02:00
Tingluo Huang
c359d14e69 Avoid nil point when config.Metrics is nil and expose all metrics if none are configured (#4101)
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2025-06-11 15:51:26 +02:00
Nikola Jokic
9d8c59aeb3 Add startup probe to dind side-car (#4117) 2025-06-11 15:50:30 +02:00
dependabot[bot]
eef57e1a77 Bump github.com/cloudflare/circl from 1.6.0 to 1.6.1 (#4118)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-11 11:46:20 +02:00
Ryo Sakamoto
97697e80b4 Add job_workflow_ref label to listener metrics (#4054)
Signed-off-by: rskmm0chang <rskmm0chang@hatena.ne.jp>
2025-06-05 08:33:30 +02:00
github-actions[bot]
27b292bdd3 Updates: runner to v2.325.0 (#4109)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-06-03 11:05:40 -04:00
Nikola Jokic
1dbb88cb9e Allow use of client id as an app id (#4057) 2025-05-16 16:21:06 +02:00
Nikola Jokic
43f1cd0dac Refactor resource naming removing unnecessary calculations (#4076) 2025-05-15 10:56:34 +02:00
Nikola Jokic
389d842a30 Relax version requirements to allow patch version mismatch (#4080)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-05-14 21:38:16 +02:00
Nikola Jokic
f6f42dd4c1 Fix docker lint warnings (#4074) 2025-05-14 19:57:39 +02:00
github-actions[bot]
20e157fa72 Updates: runner to v2.324.0 container-hooks to v0.7.0 (#4086)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-05-14 10:05:33 -04:00
Nikola Jokic
cae7efa2c6 Create backoff mechanism for failed runners and allow re-creation of failed ephemeral runners (#4059) 2025-05-14 15:38:50 +02:00
Nikola Jokic
d6e2790db5 Bump go version (#4075) 2025-05-14 13:51:47 +02:00
scodef
a1a8dc5606 Add missing backtick to metrics.serviceMonitor.namespace line to correct formatting (#3790) 2025-05-07 14:52:42 +02:00
Mosè Giordano
16304b5ce7 Fix code block fences (#3140)
Co-authored-by: Mosè Giordano <giordano@users.noreply.github.com>
2025-05-06 13:47:13 +02:00
Borislav Velkov
32f19acc66 feat(helm): move dind to sidecar (#3842) 2025-04-23 14:51:01 +02:00
Ken Muse
46ee5cf9a2 Revised dashboard (#4022) 2025-04-23 11:36:05 +02:00
Ryosei Karaki
f832b0b254 upgrade(golangci-lint): v2.1.2 (#4023)
Signed-off-by: karamaru-alpha <mrnk3078@gmail.com>
2025-04-17 16:14:31 +02:00
Nikola Jokic
a33d34a036 Pin third party actions (#3981) 2025-04-17 12:19:15 +02:00
Nikola Jokic
15990d492d Include more context to errors raised by github/actions client (#4032)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-04-11 11:36:15 +02:00
dependabot[bot]
462db4dfc8 Bump the gomod group across 1 directory with 7 updates (#4008)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2025-04-07 16:51:07 +02:00
David Maxwell
ea27448da5 Fix busy runners metric (#4016) 2025-04-04 17:17:09 +02:00
143 changed files with 24041 additions and 19271 deletions

View File

@@ -1,9 +1,9 @@
name: 'Setup ARC E2E Test Action' name: "Setup ARC E2E Test Action"
description: 'Build controller image, create kind cluster, load the image, and exchange ARC configure token.' description: "Build controller image, create kind cluster, load the image, and exchange ARC configure token."
inputs: inputs:
app-id: app-id:
description: 'GitHub App Id for exchange access token' description: "GitHub App Id for exchange access token"
required: true required: true
app-pk: app-pk:
description: "GitHub App private key for exchange access token" description: "GitHub App private key for exchange access token"
@@ -20,30 +20,31 @@ inputs:
outputs: outputs:
token: token:
description: 'Token to use for configure ARC' description: "Token to use for configure ARC"
value: ${{steps.config-token.outputs.token}} value: ${{steps.config-token.outputs.token}}
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
with: with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6 # Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent # BuildKit v0.11 which has a bug causing intermittent
# failures pushing images to GHCR # failures pushing images to GHCR
version: v0.9.1 version: v0.9.1
driver-opts: image=moby/buildkit:v0.10.6 driver-opts: image=moby/buildkit:v0.10.6
- name: Build controller image - name: Build controller image
uses: docker/build-push-action@v5 # https://github.com/docker/build-push-action/releases/tag/v6.18.0
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
with: with:
file: Dockerfile file: Dockerfile
platforms: linux/amd64 platforms: linux/amd64
load: true load: true
build-args: | build-args: |
DOCKER_IMAGE_NAME=${{inputs.image-name}} DOCKER_IMAGE_NAME=${{inputs.image-name}}
VERSION=${{inputs.image-tag}} VERSION=${{inputs.image-tag}}
tags: | tags: |
${{inputs.image-name}}:${{inputs.image-tag}} ${{inputs.image-name}}:${{inputs.image-tag}}
no-cache: true no-cache: true
@@ -56,8 +57,9 @@ runs:
- name: Get configure token - name: Get configure token
id: config-token id: config-token
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3 uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with: with:
application_id: ${{ inputs.app-id }} application_id: ${{ inputs.app-id }}
application_private_key: ${{ inputs.app-pk }} application_private_key: ${{ inputs.app-pk }}
organization: ${{ inputs.target-org}} organization: ${{ inputs.target-org}}

View File

@@ -24,23 +24,27 @@ runs:
shell: bash shell: bash
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v3 # https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 # https://github.com/docker/setup-buildx-action/releases/tag/v3.10.0
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
with: with:
version: latest version: latest
- name: Login to DockerHub - name: Login to DockerHub
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }} if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }}
uses: docker/login-action@v3 # https://github.com/docker/login-action/releases/tag/v3.4.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
with: with:
username: ${{ inputs.username }} username: ${{ inputs.username }}
password: ${{ inputs.password }} password: ${{ inputs.password }}
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }} if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }}
uses: docker/login-action@v3 # https://github.com/docker/login-action/releases/tag/v3.4.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ inputs.ghcr_username }} username: ${{ inputs.ghcr_username }}

View File

@@ -5,18 +5,18 @@ name: Publish ARC Helm Charts
on: on:
push: push:
branches: branches:
- master - master
paths: paths:
- 'charts/**' - "charts/**"
- '.github/workflows/arc-publish-chart.yaml' - ".github/workflows/arc-publish-chart.yaml"
- '!charts/actions-runner-controller/docs/**' - "!charts/actions-runner-controller/docs/**"
- '!charts/gha-runner-scale-set-controller/**' - "!charts/gha-runner-scale-set-controller/**"
- '!charts/gha-runner-scale-set/**' - "!charts/gha-runner-scale-set/**"
- '!**.md' - "!**.md"
workflow_dispatch: workflow_dispatch:
inputs: inputs:
force: force:
description: 'Force publish even if the chart version is not bumped' description: "Force publish even if the chart version is not bumped"
type: boolean type: boolean
required: true required: true
default: false default: false
@@ -39,86 +39,86 @@ jobs:
outputs: outputs:
publish-chart: ${{ steps.publish-chart-step.outputs.publish }} publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Helm - name: Set up Helm
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
with: with:
version: ${{ env.HELM_VERSION }} version: ${{ env.HELM_VERSION }}
- name: Set up kube-score - name: Set up kube-score
run: | run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score chmod 755 kube-score
- name: Kube-score generated manifests - name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests) # python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v5 - uses: actions/setup-python@v6
with: with:
python-version: '3.11' python-version: "3.11"
- name: Set up chart-testing - name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0 uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
- name: Run chart-testing (list-changed) - name: Run chart-testing (list-changed)
id: list-changed id: list-changed
run: | run: |
changed=$(ct list-changed --config charts/.ci/ct-config.yaml) changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
if [[ -n "$changed" ]]; then if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT echo "changed=true" >> $GITHUB_OUTPUT
fi fi
- name: Run chart-testing (lint) - name: Run chart-testing (lint)
run: | run: |
ct lint --config charts/.ci/ct-config.yaml ct lint --config charts/.ci/ct-config.yaml
- name: Create kind cluster - name: Create kind cluster
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
uses: helm/kind-action@v1.4.0 uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
# We need cert-manager already installed in the cluster because we assume the CRDs exist # We need cert-manager already installed in the cluster because we assume the CRDs exist
- name: Install cert-manager - name: Install cert-manager
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
run: | run: |
helm repo add jetstack https://charts.jetstack.io --force-update helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
- name: Run chart-testing (install) - name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
run: ct install --config charts/.ci/ct-config.yaml run: ct install --config charts/.ci/ct-config.yaml
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml # WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
- name: Check if Chart Publish is Needed - name: Check if Chart Publish is Needed
id: publish-chart-step id: publish-chart-step
run: | run: |
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml) CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2) NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4) RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1) LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
# Always publish if force is true # Always publish if force is true
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
echo "publish=true" >> $GITHUB_OUTPUT echo "publish=true" >> $GITHUB_OUTPUT
else else
echo "publish=false" >> $GITHUB_OUTPUT echo "publish=false" >> $GITHUB_OUTPUT
fi fi
- name: Job summary - name: Job summary
run: | run: |
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
publish-chart: publish-chart:
if: needs.lint-chart.outputs.publish-chart == 'true' if: needs.lint-chart.outputs.publish-chart == 'true'
@@ -133,80 +133,80 @@ jobs:
CHART_TARGET_BRANCH: master CHART_TARGET_BRANCH: master
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Configure Git - name: Configure Git
run: | run: |
git config user.name "$GITHUB_ACTOR" git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com" git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Get Token - name: Get Token
id: get_workflow_token id: get_workflow_token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3 uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
with: with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
organization: ${{ env.CHART_TARGET_ORG }} organization: ${{ env.CHART_TARGET_ORG }}
- name: Install chart-releaser - name: Install chart-releaser
uses: helm/chart-releaser-action@v1.4.1 uses: helm/chart-releaser-action@cae68fefc6b5f367a0275617c9f83181ba54714f
with: with:
install_only: true install_only: true
install_dir: ${{ github.workspace }}/bin install_dir: ${{ github.workspace }}/bin
- name: Package and upload release assets - name: Package and upload release assets
run: | run: |
cr package \ cr package \
${{ github.workspace }}/charts/actions-runner-controller/ \ ${{ github.workspace }}/charts/actions-runner-controller/ \
--package-path .cr-release-packages --package-path .cr-release-packages
cr upload \ cr upload \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \ --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \ --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--package-path .cr-release-packages \ --package-path .cr-release-packages \
--token ${{ secrets.GITHUB_TOKEN }} --token ${{ secrets.GITHUB_TOKEN }}
- name: Generate updated index.yaml - name: Generate updated index.yaml
run: | run: |
cr index \ cr index \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \ --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \ --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--index-path ${{ github.workspace }}/index.yaml \ --index-path ${{ github.workspace }}/index.yaml \
--token ${{ secrets.GITHUB_TOKEN }} \ --token ${{ secrets.GITHUB_TOKEN }} \
--push \ --push \
--pages-branch 'gh-pages' \ --pages-branch 'gh-pages' \
--pages-index-path 'index.yaml' --pages-index-path 'index.yaml'
# Chart Release was never intended to publish to a different repo # Chart Release was never intended to publish to a different repo
# this workaround is intended to move the index.yaml to the target repo # this workaround is intended to move the index.yaml to the target repo
# where the github pages are hosted # where the github pages are hosted
- name: Checkout target repository - name: Checkout target repository
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }} repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
path: ${{ env.CHART_TARGET_REPO }} path: ${{ env.CHART_TARGET_REPO }}
ref: ${{ env.CHART_TARGET_BRANCH }} ref: ${{ env.CHART_TARGET_BRANCH }}
token: ${{ steps.get_workflow_token.outputs.token }} token: ${{ steps.get_workflow_token.outputs.token }}
- name: Copy index.yaml - name: Copy index.yaml
run: | run: |
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
- name: Commit and push to target repository - name: Commit and push to target repository
run: | run: |
git config user.name "$GITHUB_ACTOR" git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com" git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
git add . git add .
git commit -m "Update index.yaml" git commit -m "Update index.yaml"
git push git push
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }} working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
- name: Job summary - name: Job summary
run: | run: |
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY

View File

@@ -9,17 +9,17 @@ on:
workflow_dispatch: workflow_dispatch:
inputs: inputs:
release_tag_name: release_tag_name:
description: 'Tag name of the release to publish' description: "Tag name of the release to publish"
required: true required: true
push_to_registries: push_to_registries:
description: 'Push images to registries' description: "Push images to registries"
required: true required: true
type: boolean type: boolean
default: false default: false
permissions: permissions:
contents: write contents: write
packages: write packages: write
env: env:
TARGET_ORG: actions-runner-controller TARGET_ORG: actions-runner-controller
@@ -39,11 +39,11 @@ jobs:
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }} if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
- uses: actions/setup-go@v5 - uses: actions/setup-go@v6
with: with:
go-version-file: 'go.mod' go-version-file: "go.mod"
- name: Install tools - name: Install tools
run: | run: |
@@ -73,7 +73,7 @@ jobs:
- name: Get Token - name: Get Token
id: get_workflow_token id: get_workflow_token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3 uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
with: with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}

View File

@@ -7,10 +7,10 @@ on:
# are available to the workflow run # are available to the workflow run
push: push:
branches: branches:
- 'master' - "master"
paths: paths:
- 'runner/VERSION' - "runner/VERSION"
- '.github/workflows/arc-release-runners.yaml' - ".github/workflows/arc-release-runners.yaml"
env: env:
# Safeguard to prevent pushing images to registeries after build # Safeguard to prevent pushing images to registeries after build
@@ -28,7 +28,7 @@ jobs:
name: Trigger Build and Push of Runner Images name: Trigger Build and Push of Runner Images
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: Get runner version - name: Get runner version
id: versions id: versions
run: | run: |
@@ -39,7 +39,7 @@ jobs:
- name: Get Token - name: Get Token
id: get_workflow_token id: get_workflow_token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3 uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
with: with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}

View File

@@ -21,7 +21,7 @@ jobs:
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }} container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }} container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: Get runner current and latest versions - name: Get runner current and latest versions
id: runner_versions id: runner_versions
@@ -64,7 +64,7 @@ jobs:
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}" echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}" echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: PR Name - name: PR Name
id: pr_name id: pr_name
@@ -119,7 +119,7 @@ jobs:
PR_NAME: ${{ needs.check_pr.outputs.pr_name }} PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: New branch - name: New branch
run: git checkout -b update-runner-"$(date +%Y-%m-%d)" run: git checkout -b update-runner-"$(date +%Y-%m-%d)"

View File

@@ -5,20 +5,20 @@ on:
branches: branches:
- master - master
paths: paths:
- 'charts/**' - "charts/**"
- '.github/workflows/arc-validate-chart.yaml' - ".github/workflows/arc-validate-chart.yaml"
- '!charts/actions-runner-controller/docs/**' - "!charts/actions-runner-controller/docs/**"
- '!**.md' - "!**.md"
- '!charts/gha-runner-scale-set-controller/**' - "!charts/gha-runner-scale-set-controller/**"
- '!charts/gha-runner-scale-set/**' - "!charts/gha-runner-scale-set/**"
push: push:
paths: paths:
- 'charts/**' - "charts/**"
- '.github/workflows/arc-validate-chart.yaml' - ".github/workflows/arc-validate-chart.yaml"
- '!charts/actions-runner-controller/docs/**' - "!charts/actions-runner-controller/docs/**"
- '!**.md' - "!**.md"
- '!charts/gha-runner-scale-set-controller/**' - "!charts/gha-runner-scale-set-controller/**"
- '!charts/gha-runner-scale-set/**' - "!charts/gha-runner-scale-set/**"
workflow_dispatch: workflow_dispatch:
env: env:
KUBE_SCORE_VERSION: 1.10.0 KUBE_SCORE_VERSION: 1.10.0
@@ -40,39 +40,22 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Helm - name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2 uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with: with:
version: ${{ env.HELM_VERSION }} version: ${{ env.HELM_VERSION }}
- name: Set up kube-score
run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score
- name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
--ignore-test pod-networkpolicy
--ignore-test deployment-has-poddisruptionbudget
--ignore-test deployment-has-host-podantiaffinity
--ignore-test container-security-context
--ignore-test pod-probes
--ignore-test container-image-tag
--enable-optional-test container-security-context-privileged
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests) # python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v5 - uses: actions/setup-python@v6
with: with:
python-version: '3.11' python-version: "3.11"
- name: Set up chart-testing - name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0 uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
- name: Run chart-testing (list-changed) - name: Run chart-testing (list-changed)
id: list-changed id: list-changed
@@ -87,7 +70,7 @@ jobs:
ct lint --config charts/.ci/ct-config.yaml ct lint --config charts/.ci/ct-config.yaml
- name: Create kind cluster - name: Create kind cluster
uses: helm/kind-action@v1.4.0 uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
# We need cert-manager already installed in the cluster because we assume the CRDs exist # We need cert-manager already installed in the cluster because we assume the CRDs exist

View File

@@ -3,17 +3,17 @@ name: Validate ARC Runners
on: on:
pull_request: pull_request:
branches: branches:
- '**' - "**"
paths: paths:
- 'runner/**' - "runner/**"
- 'test/startup/**' - "test/startup/**"
- '!**.md' - "!**.md"
permissions: permissions:
contents: read contents: read
concurrency: concurrency:
# This will make sure we only apply the concurrency limits on pull requests # This will make sure we only apply the concurrency limits on pull requests
# but not pushes to master branch by making the concurrency group name unique # but not pushes to master branch by making the concurrency group name unique
# for pushes # for pushes
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -24,29 +24,17 @@ jobs:
name: runner / shellcheck name: runner / shellcheck
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: shellcheck - name: "Run shellcheck"
uses: reviewdog/action-shellcheck@v1 run: make shellcheck
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
path: "./runner"
pattern: |
*.sh
*.bash
update-status
# Make this consistent with `make shellsheck`
shellcheck_flags: "--shell bash --source-path runner"
exclude: "./.git/*"
check_all_files_with_shebangs: "false"
# Set this to "true" once we addressed all the shellcheck findings
fail_on_error: "false"
test-runner-entrypoint: test-runner-entrypoint:
name: Test entrypoint name: Test entrypoint
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Run tests - name: Run tests
run: | run: |
make acceptance/runner/startup make acceptance/runner/startup

View File

@@ -16,7 +16,7 @@ env:
TARGET_ORG: actions-runner-controller TARGET_ORG: actions-runner-controller
TARGET_REPO: arc_e2e_test_dummy TARGET_REPO: arc_e2e_test_dummy
IMAGE_NAME: "arc-test-image" IMAGE_NAME: "arc-test-image"
IMAGE_VERSION: "0.11.0" IMAGE_VERSION: "0.12.1"
concurrency: concurrency:
# This will make sure we only apply the concurrency limits on pull requests # This will make sure we only apply the concurrency limits on pull requests
@@ -33,7 +33,7 @@ jobs:
env: env:
WORKFLOW_FILE: "arc-test-workflow.yaml" WORKFLOW_FILE: "arc-test-workflow.yaml"
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
ref: ${{github.head_ref}} ref: ${{github.head_ref}}
@@ -124,7 +124,7 @@ jobs:
env: env:
WORKFLOW_FILE: "arc-test-workflow.yaml" WORKFLOW_FILE: "arc-test-workflow.yaml"
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
ref: ${{github.head_ref}} ref: ${{github.head_ref}}
@@ -217,7 +217,7 @@ jobs:
env: env:
WORKFLOW_FILE: arc-test-dind-workflow.yaml WORKFLOW_FILE: arc-test-dind-workflow.yaml
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
ref: ${{github.head_ref}} ref: ${{github.head_ref}}
@@ -309,7 +309,7 @@ jobs:
env: env:
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml" WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
ref: ${{github.head_ref}} ref: ${{github.head_ref}}
@@ -410,7 +410,7 @@ jobs:
env: env:
WORKFLOW_FILE: "arc-test-workflow.yaml" WORKFLOW_FILE: "arc-test-workflow.yaml"
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
ref: ${{github.head_ref}} ref: ${{github.head_ref}}
@@ -513,7 +513,7 @@ jobs:
env: env:
WORKFLOW_FILE: "arc-test-workflow.yaml" WORKFLOW_FILE: "arc-test-workflow.yaml"
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
ref: ${{github.head_ref}} ref: ${{github.head_ref}}
@@ -610,7 +610,7 @@ jobs:
env: env:
WORKFLOW_FILE: "arc-test-workflow.yaml" WORKFLOW_FILE: "arc-test-workflow.yaml"
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
ref: ${{github.head_ref}} ref: ${{github.head_ref}}
@@ -732,7 +732,7 @@ jobs:
env: env:
WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml" WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml"
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
ref: ${{github.head_ref}} ref: ${{github.head_ref}}
@@ -904,7 +904,7 @@ jobs:
env: env:
WORKFLOW_FILE: arc-test-workflow.yaml WORKFLOW_FILE: arc-test-workflow.yaml
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
with: with:
ref: ${{ github.head_ref }} ref: ${{ github.head_ref }}

View File

@@ -4,27 +4,27 @@ on:
workflow_dispatch: workflow_dispatch:
inputs: inputs:
ref: ref:
description: 'The branch, tag or SHA to cut a release from' description: "The branch, tag or SHA to cut a release from"
required: false required: false
type: string type: string
default: '' default: ""
release_tag_name: release_tag_name:
description: 'The name to tag the controller image with' description: "The name to tag the controller image with"
required: true required: true
type: string type: string
default: 'canary' default: "canary"
push_to_registries: push_to_registries:
description: 'Push images to registries' description: "Push images to registries"
required: true required: true
type: boolean type: boolean
default: false default: false
publish_gha_runner_scale_set_controller_chart: publish_gha_runner_scale_set_controller_chart:
description: 'Publish new helm chart for gha-runner-scale-set-controller' description: "Publish new helm chart for gha-runner-scale-set-controller"
required: true required: true
type: boolean type: boolean
default: false default: false
publish_gha_runner_scale_set_chart: publish_gha_runner_scale_set_chart:
description: 'Publish new helm chart for gha-runner-scale-set' description: "Publish new helm chart for gha-runner-scale-set"
required: true required: true
type: boolean type: boolean
default: false default: false
@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
# If inputs.ref is empty, it'll resolve to the default branch # If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }} ref: ${{ inputs.ref }}
@@ -72,10 +72,10 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
with: with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6 # Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent # BuildKit v0.11 which has a bug causing intermittent
@@ -84,14 +84,14 @@ jobs:
driver-opts: image=moby/buildkit:v0.10.6 driver-opts: image=moby/buildkit:v0.10.6
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v3 uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.actor }} username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Build & push controller image - name: Build & push controller image
uses: docker/build-push-action@v5 uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
with: with:
file: Dockerfile file: Dockerfile
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
@@ -100,8 +100,6 @@ jobs:
tags: | tags: |
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }} ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}-${{ steps.resolve_parameters.outputs.short_sha }} ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}-${{ steps.resolve_parameters.outputs.short_sha }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Job summary - name: Job summary
run: | run: |
@@ -121,7 +119,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
# If inputs.ref is empty, it'll resolve to the default branch # If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }} ref: ${{ inputs.ref }}
@@ -140,8 +138,7 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm - name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2 uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with: with:
version: ${{ env.HELM_VERSION }} version: ${{ env.HELM_VERSION }}
@@ -169,7 +166,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
# If inputs.ref is empty, it'll resolve to the default branch # If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }} ref: ${{ inputs.ref }}
@@ -188,8 +185,7 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm - name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2 uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with: with:
version: ${{ env.HELM_VERSION }} version: ${{ env.HELM_VERSION }}

View File

@@ -5,16 +5,16 @@ on:
branches: branches:
- master - master
paths: paths:
- 'charts/**' - "charts/**"
- '.github/workflows/gha-validate-chart.yaml' - ".github/workflows/gha-validate-chart.yaml"
- '!charts/actions-runner-controller/**' - "!charts/actions-runner-controller/**"
- '!**.md' - "!**.md"
push: push:
paths: paths:
- 'charts/**' - "charts/**"
- '.github/workflows/gha-validate-chart.yaml' - ".github/workflows/gha-validate-chart.yaml"
- '!charts/actions-runner-controller/**' - "!charts/actions-runner-controller/**"
- '!**.md' - "!**.md"
workflow_dispatch: workflow_dispatch:
env: env:
KUBE_SCORE_VERSION: 1.16.1 KUBE_SCORE_VERSION: 1.16.1
@@ -36,23 +36,22 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Helm - name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2 uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with: with:
version: ${{ env.HELM_VERSION }} version: ${{ env.HELM_VERSION }}
# python is a requirement for the chart-testing action below (supports yamllint among other tests) # python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v5 - uses: actions/setup-python@v6
with: with:
python-version: '3.11' python-version: "3.11"
- name: Set up chart-testing - name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0 uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
- name: Run chart-testing (list-changed) - name: Run chart-testing (list-changed)
id: list-changed id: list-changed
@@ -68,13 +67,13 @@ jobs:
ct lint --config charts/.ci/ct-config-gha.yaml ct lint --config charts/.ci/ct-config-gha.yaml
- name: Set up docker buildx - name: Set up docker buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
with: with:
version: latest version: latest
- name: Build controller image - name: Build controller image
uses: docker/build-push-action@v5 uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
with: with:
file: Dockerfile file: Dockerfile
@@ -89,7 +88,7 @@ jobs:
cache-to: type=gha,mode=max cache-to: type=gha,mode=max
- name: Create kind cluster - name: Create kind cluster
uses: helm/kind-action@v1.4.0 uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
with: with:
cluster_name: chart-testing cluster_name: chart-testing
@@ -97,11 +96,11 @@ jobs:
- name: Load image into cluster - name: Load image into cluster
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
run: | run: |
export DOCKER_IMAGE_NAME=test-arc export DOCKER_IMAGE_NAME=test-arc
export VERSION=dev export VERSION=dev
export IMG_RESULT=load export IMG_RESULT=load
make docker-buildx make docker-buildx
kind load docker-image test-arc:dev --name chart-testing kind load docker-image test-arc:dev --name chart-testing
- name: Run chart-testing (install) - name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
@@ -112,8 +111,8 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
- uses: actions/setup-go@v5 - uses: actions/setup-go@v6
with: with:
go-version-file: "go.mod" go-version-file: "go.mod"
cache: false cache: false

View File

@@ -7,30 +7,30 @@ on:
branches: branches:
- master - master
paths-ignore: paths-ignore:
- '**.md' - "**.md"
- '.github/actions/**' - ".github/actions/**"
- '.github/ISSUE_TEMPLATE/**' - ".github/ISSUE_TEMPLATE/**"
- '.github/workflows/e2e-test-dispatch-workflow.yaml' - ".github/workflows/e2e-test-dispatch-workflow.yaml"
- '.github/workflows/gha-e2e-tests.yaml' - ".github/workflows/gha-e2e-tests.yaml"
- '.github/workflows/arc-publish.yaml' - ".github/workflows/arc-publish.yaml"
- '.github/workflows/arc-publish-chart.yaml' - ".github/workflows/arc-publish-chart.yaml"
- '.github/workflows/gha-publish-chart.yaml' - ".github/workflows/gha-publish-chart.yaml"
- '.github/workflows/arc-release-runners.yaml' - ".github/workflows/arc-release-runners.yaml"
- '.github/workflows/global-run-codeql.yaml' - ".github/workflows/global-run-codeql.yaml"
- '.github/workflows/global-run-first-interaction.yaml' - ".github/workflows/global-run-first-interaction.yaml"
- '.github/workflows/global-run-stale.yaml' - ".github/workflows/global-run-stale.yaml"
- '.github/workflows/arc-update-runners-scheduled.yaml' - ".github/workflows/arc-update-runners-scheduled.yaml"
- '.github/workflows/validate-arc.yaml' - ".github/workflows/validate-arc.yaml"
- '.github/workflows/arc-validate-chart.yaml' - ".github/workflows/arc-validate-chart.yaml"
- '.github/workflows/gha-validate-chart.yaml' - ".github/workflows/gha-validate-chart.yaml"
- '.github/workflows/arc-validate-runners.yaml' - ".github/workflows/arc-validate-runners.yaml"
- '.github/dependabot.yml' - ".github/dependabot.yml"
- '.github/RELEASE_NOTE_TEMPLATE.md' - ".github/RELEASE_NOTE_TEMPLATE.md"
- 'runner/**' - "runner/**"
- '.gitignore' - ".gitignore"
- 'PROJECT' - "PROJECT"
- 'LICENSE' - "LICENSE"
- 'Makefile' - "Makefile"
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps # https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
permissions: permissions:
@@ -55,11 +55,11 @@ jobs:
TARGET_REPO: actions-runner-controller TARGET_REPO: actions-runner-controller
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Get Token - name: Get Token
id: get_workflow_token id: get_workflow_token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3 uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
with: with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
@@ -90,10 +90,10 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v3 uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.actor }} username: ${{ github.actor }}
@@ -110,16 +110,16 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
with: with:
version: latest version: latest
# Unstable builds - run at your own risk # Unstable builds - run at your own risk
- name: Build and Push - name: Build and Push
uses: docker/build-push-action@v5 uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
with: with:
context: . context: .
file: ./Dockerfile file: ./Dockerfile

View File

@@ -25,20 +25,20 @@ jobs:
security-events: write security-events: write
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Install Go - name: Install Go
uses: actions/setup-go@v5 uses: actions/setup-go@v6
with: with:
go-version-file: go.mod go-version-file: go.mod
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v2 uses: github/codeql-action/init@v3
with: with:
languages: go languages: go, actions
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@v2 uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2 uses: github/codeql-action/analyze@v3

View File

@@ -11,7 +11,7 @@ jobs:
check_for_first_interaction: check_for_first_interaction:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- uses: actions/first-interaction@main - uses: actions/first-interaction@main
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -14,7 +14,7 @@ jobs:
issues: write # for actions/stale to close stale issues issues: write # for actions/stale to close stale issues
pull-requests: write # for actions/stale to close stale PRs pull-requests: write # for actions/stale to close stale PRs
steps: steps:
- uses: actions/stale@v6 - uses: actions/stale@v10
with: with:
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.' stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
# turn off stale for both issues and PRs # turn off stale for both issues and PRs

View File

@@ -4,16 +4,16 @@ on:
branches: branches:
- master - master
paths: paths:
- '.github/workflows/go.yaml' - ".github/workflows/go.yaml"
- '**.go' - "**.go"
- 'go.mod' - "go.mod"
- 'go.sum' - "go.sum"
pull_request: pull_request:
paths: paths:
- '.github/workflows/go.yaml' - ".github/workflows/go.yaml"
- '**.go' - "**.go"
- 'go.mod' - "go.mod"
- 'go.sum' - "go.sum"
permissions: permissions:
contents: read contents: read
@@ -29,10 +29,10 @@ jobs:
fmt: fmt:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- uses: actions/setup-go@v5 - uses: actions/setup-go@v6
with: with:
go-version-file: 'go.mod' go-version-file: "go.mod"
cache: false cache: false
- name: fmt - name: fmt
run: go fmt ./... run: go fmt ./...
@@ -42,24 +42,24 @@ jobs:
lint: lint:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- uses: actions/setup-go@v5 - uses: actions/setup-go@v6
with: with:
go-version-file: 'go.mod' go-version-file: "go.mod"
cache: false cache: false
- name: golangci-lint - name: golangci-lint
uses: golangci/golangci-lint-action@v6 uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9
with: with:
only-new-issues: true only-new-issues: true
version: v1.55.2 version: v2.1.2
generate: generate:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- uses: actions/setup-go@v5 - uses: actions/setup-go@v6
with: with:
go-version-file: 'go.mod' go-version-file: "go.mod"
cache: false cache: false
- name: Generate - name: Generate
run: make generate run: make generate
@@ -69,10 +69,10 @@ jobs:
test: test:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- uses: actions/setup-go@v5 - uses: actions/setup-go@v6
with: with:
go-version-file: 'go.mod' go-version-file: "go.mod"
- run: make manifests - run: make manifests
- name: Check diff - name: Check diff
run: git diff --exit-code run: git diff --exit-code

View File

@@ -1,19 +1,14 @@
version: "2"
run: run:
timeout: 3m timeout: 5m
output: linters:
formats: settings:
- format: github-actions errcheck:
path: stdout exclude-functions:
linters-settings: - (net/http.ResponseWriter).Write
errcheck: - (*net/http.Server).Shutdown
exclude-functions: - (*github.com/actions/actions-runner-controller/simulator.VisibleRunnerGroups).Add
- (net/http.ResponseWriter).Write - (*github.com/actions/actions-runner-controller/testing.Kind).Stop
- (*net/http.Server).Shutdown exclusions:
- (*github.com/actions/actions-runner-controller/simulator.VisibleRunnerGroups).Add presets:
- (*github.com/actions/actions-runner-controller/testing.Kind).Stop - std-error-handling
issues:
exclude-rules:
- path: controllers/suite_test.go
linters:
- staticcheck
text: "SA1019"

View File

@@ -1,2 +1,2 @@
# actions-runner-controller maintainers # actions-runner-controller maintainers
* @mumoshu @toast-gear @actions/actions-launch @nikola-jokic @rentziass * @mumoshu @toast-gear @actions/actions-launch @actions/actions-compute @nikola-jokic @rentziass

View File

@@ -1,5 +1,5 @@
# Build the manager binary # Build the manager binary
FROM --platform=$BUILDPLATFORM golang:1.24.0 as builder FROM --platform=$BUILDPLATFORM golang:1.24.3 AS builder
WORKDIR /workspace WORKDIR /workspace
@@ -30,7 +30,7 @@ ARG TARGETPLATFORM TARGETOS TARGETARCH TARGETVARIANT VERSION=dev COMMIT_SHA=dev
# to avoid https://github.com/moby/buildkit/issues/2334 # to avoid https://github.com/moby/buildkit/issues/2334
# We can use docker layer cache so the build is fast enogh anyway # We can use docker layer cache so the build is fast enogh anyway
# We also use per-platform GOCACHE for the same reason. # We also use per-platform GOCACHE for the same reason.
ENV GOCACHE /build/${TARGETPLATFORM}/root/.cache/go-build ENV GOCACHE="/build/${TARGETPLATFORM}/root/.cache/go-build"
# Build # Build
RUN --mount=target=. \ RUN --mount=target=. \

View File

@@ -6,7 +6,7 @@ endif
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1) DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
VERSION ?= dev VERSION ?= dev
COMMIT_SHA = $(shell git rev-parse HEAD) COMMIT_SHA = $(shell git rev-parse HEAD)
RUNNER_VERSION ?= 2.323.0 RUNNER_VERSION ?= 2.328.0
TARGETPLATFORM ?= $(shell arch) TARGETPLATFORM ?= $(shell arch)
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION} RUNNER_TAG ?= ${VERSION}
@@ -20,7 +20,7 @@ KUBECONTEXT ?= kind-acceptance
CLUSTER ?= acceptance CLUSTER ?= acceptance
CERT_MANAGER_VERSION ?= v1.1.1 CERT_MANAGER_VERSION ?= v1.1.1
KUBE_RBAC_PROXY_VERSION ?= v0.11.0 KUBE_RBAC_PROXY_VERSION ?= v0.11.0
SHELLCHECK_VERSION ?= 0.8.0 SHELLCHECK_VERSION ?= 0.10.0
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) # Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=true" CRD_OPTIONS ?= "crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=true"
@@ -68,7 +68,7 @@ endif
all: manager all: manager
lint: lint:
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.57.2 golangci-lint run docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v2.1.2 golangci-lint run
GO_TEST_ARGS ?= -short GO_TEST_ARGS ?= -short
@@ -117,9 +117,6 @@ manifests: manifests-gen-crds chart-crds
manifests-gen-crds: controller-gen yq manifests-gen-crds: controller-gen yq
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
for YAMLFILE in config/crd/bases/actions*.yaml; do \
$(YQ) '.spec.preserveUnknownFields = false' --inplace "$$YAMLFILE" ; \
done
make manifests-gen-crds-fix DELETE_KEY=x-kubernetes-list-type make manifests-gen-crds-fix DELETE_KEY=x-kubernetes-list-type
make manifests-gen-crds-fix DELETE_KEY=x-kubernetes-list-map-keys make manifests-gen-crds-fix DELETE_KEY=x-kubernetes-list-map-keys
@@ -204,7 +201,7 @@ generate: controller-gen
# Run shellcheck on runner scripts # Run shellcheck on runner scripts
shellcheck: shellcheck-install shellcheck: shellcheck-install
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh hack/*.sh $(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh runner/update-status hack/*.sh
docker-buildx: docker-buildx:
export DOCKER_CLI_EXPERIMENTAL=enabled ;\ export DOCKER_CLI_EXPERIMENTAL=enabled ;\
@@ -310,7 +307,7 @@ github-release: release
# Otherwise we get errors like the below: # Otherwise we get errors like the below:
# Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property] # Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property]
# #
# Note that controller-gen newer than 0.6.2 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448 # Note that controller-gen newer than 0.8.0 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
# Otherwise ObjectMeta embedded in Spec results in empty on the storage. # Otherwise ObjectMeta embedded in Spec results in empty on the storage.
controller-gen: controller-gen:
ifeq (, $(shell which controller-gen)) ifeq (, $(shell which controller-gen))

View File

@@ -5,22 +5,23 @@ on:
env: env:
IRSA_ROLE_ARN: IRSA_ROLE_ARN:
ASSUME_ROLE_ARN: ASSUME_ROLE_ARN:
AWS_REGION: AWS_REGION:
jobs: jobs:
assume-role-in-runner-test: assume-role-in-runner-test:
runs-on: ['self-hosted', 'Linux'] runs-on: ["self-hosted", "Linux"]
steps: steps:
- name: Test aws-actions/configure-aws-credentials Action - name: Test aws-actions/configure-aws-credentials Action
uses: aws-actions/configure-aws-credentials@v1 # https://github.com/aws-actions/configure-aws-credentials/releases/tag/v4.1.0
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722
with: with:
aws-region: ${{ env.AWS_REGION }} aws-region: ${{ env.AWS_REGION }}
role-to-assume: ${{ env.ASSUME_ROLE_ARN }} role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
role-duration-seconds: 900 role-duration-seconds: 900
assume-role-in-container-test: assume-role-in-container-test:
runs-on: ['self-hosted', 'Linux'] runs-on: ["self-hosted", "Linux"]
container: container:
image: amazon/aws-cli image: amazon/aws-cli
env: env:
AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
@@ -29,7 +30,8 @@ jobs:
- /var/run/secrets/eks.amazonaws.com/serviceaccount/token:/var/run/secrets/eks.amazonaws.com/serviceaccount/token - /var/run/secrets/eks.amazonaws.com/serviceaccount/token:/var/run/secrets/eks.amazonaws.com/serviceaccount/token
steps: steps:
- name: Test aws-actions/configure-aws-credentials Action in container - name: Test aws-actions/configure-aws-credentials Action in container
uses: aws-actions/configure-aws-credentials@v1 # https://github.com/aws-actions/configure-aws-credentials/releases/tag/v4.1.0
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722
with: with:
aws-region: ${{ env.AWS_REGION }} aws-region: ${{ env.AWS_REGION }}
role-to-assume: ${{ env.ASSUME_ROLE_ARN }} role-to-assume: ${{ env.ASSUME_ROLE_ARN }}

View File

@@ -8,8 +8,8 @@ env:
jobs: jobs:
run-step-in-container-test: run-step-in-container-test:
runs-on: ['self-hosted', 'Linux'] runs-on: ["self-hosted", "Linux"]
container: container:
image: alpine image: alpine
steps: steps:
- name: Test we are working in the container - name: Test we are working in the container
@@ -21,7 +21,7 @@ jobs:
exit 1 exit 1
fi fi
setup-python-test: setup-python-test:
runs-on: ['self-hosted', 'Linux'] runs-on: ["self-hosted", "Linux"]
steps: steps:
- name: Print native Python environment - name: Print native Python environment
run: | run: |
@@ -41,12 +41,12 @@ jobs:
echo "Python version detected : $(python --version 2>&1)" echo "Python version detected : $(python --version 2>&1)"
fi fi
setup-node-test: setup-node-test:
runs-on: ['self-hosted', 'Linux'] runs-on: ["self-hosted", "Linux"]
steps: steps:
- uses: actions/setup-node@v2 - uses: actions/setup-node@v2
with: with:
node-version: '12' node-version: "12"
- name: Test actions/setup-node works - name: Test actions/setup-node works
run: | run: |
VERSION=$(node --version | cut -c 2- | cut -d '.' -f1) VERSION=$(node --version | cut -c 2- | cut -d '.' -f1)
if [[ $VERSION != '12' ]]; then if [[ $VERSION != '12' ]]; then
@@ -57,13 +57,14 @@ jobs:
echo "Node version detected : $(node --version 2>&1)" echo "Node version detected : $(node --version 2>&1)"
fi fi
setup-ruby-test: setup-ruby-test:
runs-on: ['self-hosted', 'Linux'] runs-on: ["self-hosted", "Linux"]
steps: steps:
- uses: ruby/setup-ruby@v1 # https://github.com/ruby/setup-ruby/releases/tag/v1.227.0
- uses: ruby/setup-ruby@1a615958ad9d422dd932dc1d5823942ee002799f
with: with:
ruby-version: 3.0 ruby-version: 3.0
bundler-cache: true bundler-cache: true
- name: Test ruby/setup-ruby works - name: Test ruby/setup-ruby works
run: | run: |
VERSION=$(ruby --version | cut -d ' ' -f2 | cut -d '.' -f1-2) VERSION=$(ruby --version | cut -d ' ' -f2 | cut -d '.' -f1-2)
if [[ $VERSION != '3.0' ]]; then if [[ $VERSION != '3.0' ]]; then
@@ -74,8 +75,8 @@ jobs:
echo "Ruby version detected : $(ruby --version 2>&1)" echo "Ruby version detected : $(ruby --version 2>&1)"
fi fi
python-shell-test: python-shell-test:
runs-on: ['self-hosted', 'Linux'] runs-on: ["self-hosted", "Linux"]
steps: steps:
- name: Test Python shell works - name: Test Python shell works
run: | run: |
import os import os

View File

@@ -0,0 +1,89 @@
package appconfig
import (
"bytes"
"encoding/json"
"fmt"
"strconv"
corev1 "k8s.io/api/core/v1"
)
type AppConfig struct {
AppID string `json:"github_app_id"`
AppInstallationID int64 `json:"github_app_installation_id"`
AppPrivateKey string `json:"github_app_private_key"`
Token string `json:"github_token"`
}
func (c *AppConfig) tidy() *AppConfig {
if len(c.Token) > 0 {
return &AppConfig{
Token: c.Token,
}
}
return &AppConfig{
AppID: c.AppID,
AppInstallationID: c.AppInstallationID,
AppPrivateKey: c.AppPrivateKey,
}
}
func (c *AppConfig) Validate() error {
if c == nil {
return fmt.Errorf("missing app config")
}
hasToken := len(c.Token) > 0
hasGitHubAppAuth := c.hasGitHubAppAuth()
if hasToken && hasGitHubAppAuth {
return fmt.Errorf("both PAT and GitHub App credentials provided. should only provide one")
}
if !hasToken && !hasGitHubAppAuth {
return fmt.Errorf("no credentials provided: either a PAT or GitHub App credentials should be provided")
}
return nil
}
func (c *AppConfig) hasGitHubAppAuth() bool {
return len(c.AppID) > 0 && c.AppInstallationID > 0 && len(c.AppPrivateKey) > 0
}
func FromSecret(secret *corev1.Secret) (*AppConfig, error) {
var appInstallationID int64
if v := string(secret.Data["github_app_installation_id"]); v != "" {
val, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
appInstallationID = val
}
cfg := &AppConfig{
Token: string(secret.Data["github_token"]),
AppID: string(secret.Data["github_app_id"]),
AppInstallationID: appInstallationID,
AppPrivateKey: string(secret.Data["github_app_private_key"]),
}
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("failed to validate config: %v", err)
}
return cfg.tidy(), nil
}
func FromJSONString(v string) (*AppConfig, error) {
var appConfig AppConfig
if err := json.NewDecoder(bytes.NewBufferString(v)).Decode(&appConfig); err != nil {
return nil, err
}
if err := appConfig.Validate(); err != nil {
return nil, fmt.Errorf("failed to validate app config decoded from string: %w", err)
}
return appConfig.tidy(), nil
}

View File

@@ -0,0 +1,152 @@
package appconfig
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
)
func TestAppConfigValidate_invalid(t *testing.T) {
tt := map[string]*AppConfig{
"empty": {},
"token and app config": {
AppID: "1",
AppInstallationID: 2,
AppPrivateKey: "private key",
Token: "token",
},
"app id not set": {
AppInstallationID: 2,
AppPrivateKey: "private key",
},
"app installation id not set": {
AppID: "2",
AppPrivateKey: "private key",
},
"private key empty": {
AppID: "2",
AppInstallationID: 1,
AppPrivateKey: "",
},
}
for name, cfg := range tt {
t.Run(name, func(t *testing.T) {
err := cfg.Validate()
require.Error(t, err)
})
}
}
func TestAppConfigValidate_valid(t *testing.T) {
tt := map[string]*AppConfig{
"token": {
Token: "token",
},
"app ID": {
AppID: "1",
AppInstallationID: 2,
AppPrivateKey: "private key",
},
}
for name, cfg := range tt {
t.Run(name, func(t *testing.T) {
err := cfg.Validate()
require.NoError(t, err)
})
}
}
func TestAppConfigFromSecret_invalid(t *testing.T) {
tt := map[string]map[string]string{
"empty": {},
"token and app provided": {
"github_token": "token",
"github_app_id": "2",
"githu_app_installation_id": "3",
"github_app_private_key": "private key",
},
"invalid app id": {
"github_app_id": "abc",
"githu_app_installation_id": "3",
"github_app_private_key": "private key",
},
"invalid app installation_id": {
"github_app_id": "1",
"githu_app_installation_id": "abc",
"github_app_private_key": "private key",
},
"empty private key": {
"github_app_id": "1",
"githu_app_installation_id": "2",
"github_app_private_key": "",
},
}
for name, data := range tt {
t.Run(name, func(t *testing.T) {
secret := &corev1.Secret{
StringData: data,
}
appConfig, err := FromSecret(secret)
assert.Error(t, err)
assert.Nil(t, appConfig)
})
}
}
func TestAppConfigFromSecret_valid(t *testing.T) {
tt := map[string]map[string]string{
"with token": {
"github_token": "token",
},
"app config": {
"github_app_id": "2",
"githu_app_installation_id": "3",
"github_app_private_key": "private key",
},
}
for name, data := range tt {
t.Run(name, func(t *testing.T) {
secret := &corev1.Secret{
StringData: data,
}
appConfig, err := FromSecret(secret)
assert.Error(t, err)
assert.Nil(t, appConfig)
})
}
}
func TestAppConfigFromString_valid(t *testing.T) {
tt := map[string]*AppConfig{
"token": {
Token: "token",
},
"app ID": {
AppID: "1",
AppInstallationID: 2,
AppPrivateKey: "private key",
},
}
for name, cfg := range tt {
t.Run(name, func(t *testing.T) {
bytes, err := json.Marshal(cfg)
require.NoError(t, err)
got, err := FromJSONString(string(bytes))
require.NoError(t, err)
want := cfg.tidy()
assert.Equal(t, want, got)
})
}
}

View File

@@ -59,7 +59,10 @@ type AutoscalingListenerSpec struct {
Proxy *ProxyConfig `json:"proxy,omitempty"` Proxy *ProxyConfig `json:"proxy,omitempty"`
// +optional // +optional
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"` GitHubServerTLS *TLSConfig `json:"githubServerTLS,omitempty"`
// +optional
VaultConfig *VaultConfig `json:"vaultConfig,omitempty"`
// +optional // +optional
Metrics *MetricsConfig `json:"metrics,omitempty"` Metrics *MetricsConfig `json:"metrics,omitempty"`
@@ -87,7 +90,6 @@ type AutoscalingListener struct {
} }
// +kubebuilder:object:root=true // +kubebuilder:object:root=true
// AutoscalingListenerList contains a list of AutoscalingListener // AutoscalingListenerList contains a list of AutoscalingListener
type AutoscalingListenerList struct { type AutoscalingListenerList struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`

View File

@@ -24,6 +24,7 @@ import (
"strings" "strings"
"github.com/actions/actions-runner-controller/hash" "github.com/actions/actions-runner-controller/hash"
"github.com/actions/actions-runner-controller/vault"
"golang.org/x/net/http/httpproxy" "golang.org/x/net/http/httpproxy"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -69,7 +70,10 @@ type AutoscalingRunnerSetSpec struct {
Proxy *ProxyConfig `json:"proxy,omitempty"` Proxy *ProxyConfig `json:"proxy,omitempty"`
// +optional // +optional
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"` GitHubServerTLS *TLSConfig `json:"githubServerTLS,omitempty"`
// +optional
VaultConfig *VaultConfig `json:"vaultConfig,omitempty"`
// Required // Required
Template corev1.PodTemplateSpec `json:"template,omitempty"` Template corev1.PodTemplateSpec `json:"template,omitempty"`
@@ -89,12 +93,12 @@ type AutoscalingRunnerSetSpec struct {
MinRunners *int `json:"minRunners,omitempty"` MinRunners *int `json:"minRunners,omitempty"`
} }
type GitHubServerTLSConfig struct { type TLSConfig struct {
// Required // Required
CertificateFrom *TLSCertificateSource `json:"certificateFrom,omitempty"` CertificateFrom *TLSCertificateSource `json:"certificateFrom,omitempty"`
} }
func (c *GitHubServerTLSConfig) ToCertPool(keyFetcher func(name, key string) ([]byte, error)) (*x509.CertPool, error) { func (c *TLSConfig) ToCertPool(keyFetcher func(name, key string) ([]byte, error)) (*x509.CertPool, error) {
if c.CertificateFrom == nil { if c.CertificateFrom == nil {
return nil, fmt.Errorf("certificateFrom not specified") return nil, fmt.Errorf("certificateFrom not specified")
} }
@@ -142,7 +146,7 @@ type ProxyConfig struct {
NoProxy []string `json:"noProxy,omitempty"` NoProxy []string `json:"noProxy,omitempty"`
} }
func (c *ProxyConfig) toHTTPProxyConfig(secretFetcher func(string) (*corev1.Secret, error)) (*httpproxy.Config, error) { func (c *ProxyConfig) ToHTTPProxyConfig(secretFetcher func(string) (*corev1.Secret, error)) (*httpproxy.Config, error) {
config := &httpproxy.Config{ config := &httpproxy.Config{
NoProxy: strings.Join(c.NoProxy, ","), NoProxy: strings.Join(c.NoProxy, ","),
} }
@@ -201,7 +205,7 @@ func (c *ProxyConfig) toHTTPProxyConfig(secretFetcher func(string) (*corev1.Secr
} }
func (c *ProxyConfig) ToSecretData(secretFetcher func(string) (*corev1.Secret, error)) (map[string][]byte, error) { func (c *ProxyConfig) ToSecretData(secretFetcher func(string) (*corev1.Secret, error)) (map[string][]byte, error) {
config, err := c.toHTTPProxyConfig(secretFetcher) config, err := c.ToHTTPProxyConfig(secretFetcher)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -215,7 +219,7 @@ func (c *ProxyConfig) ToSecretData(secretFetcher func(string) (*corev1.Secret, e
} }
func (c *ProxyConfig) ProxyFunc(secretFetcher func(string) (*corev1.Secret, error)) (func(*http.Request) (*url.URL, error), error) { func (c *ProxyConfig) ProxyFunc(secretFetcher func(string) (*corev1.Secret, error)) (func(*http.Request) (*url.URL, error), error) {
config, err := c.toHTTPProxyConfig(secretFetcher) config, err := c.ToHTTPProxyConfig(secretFetcher)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -235,6 +239,26 @@ type ProxyServerConfig struct {
CredentialSecretRef string `json:"credentialSecretRef,omitempty"` CredentialSecretRef string `json:"credentialSecretRef,omitempty"`
} }
type VaultConfig struct {
// +optional
Type vault.VaultType `json:"type,omitempty"`
// +optional
AzureKeyVault *AzureKeyVaultConfig `json:"azureKeyVault,omitempty"`
// +optional
Proxy *ProxyConfig `json:"proxy,omitempty"`
}
type AzureKeyVaultConfig struct {
// +required
URL string `json:"url,omitempty"`
// +required
TenantID string `json:"tenantId,omitempty"`
// +required
ClientID string `json:"clientId,omitempty"`
// +required
CertificatePath string `json:"certificatePath,omitempty"`
}
// MetricsConfig holds configuration parameters for each metric type // MetricsConfig holds configuration parameters for each metric type
type MetricsConfig struct { type MetricsConfig struct {
// +optional // +optional
@@ -285,6 +309,33 @@ func (ars *AutoscalingRunnerSet) ListenerSpecHash() string {
return hash.ComputeTemplateHash(&spec) return hash.ComputeTemplateHash(&spec)
} }
func (ars *AutoscalingRunnerSet) GitHubConfigSecret() string {
return ars.Spec.GitHubConfigSecret
}
func (ars *AutoscalingRunnerSet) GitHubConfigUrl() string {
return ars.Spec.GitHubConfigUrl
}
func (ars *AutoscalingRunnerSet) GitHubProxy() *ProxyConfig {
return ars.Spec.Proxy
}
func (ars *AutoscalingRunnerSet) GitHubServerTLS() *TLSConfig {
return ars.Spec.GitHubServerTLS
}
func (ars *AutoscalingRunnerSet) VaultConfig() *VaultConfig {
return ars.Spec.VaultConfig
}
func (ars *AutoscalingRunnerSet) VaultProxy() *ProxyConfig {
if ars.Spec.VaultConfig != nil {
return ars.Spec.VaultConfig.Proxy
}
return nil
}
func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string { func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string {
type runnerSetSpec struct { type runnerSetSpec struct {
GitHubConfigUrl string GitHubConfigUrl string
@@ -292,7 +343,7 @@ func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string {
RunnerGroup string RunnerGroup string
RunnerScaleSetName string RunnerScaleSetName string
Proxy *ProxyConfig Proxy *ProxyConfig
GitHubServerTLS *GitHubServerTLSConfig GitHubServerTLS *TLSConfig
Template corev1.PodTemplateSpec Template corev1.PodTemplateSpec
} }
spec := &runnerSetSpec{ spec := &runnerSetSpec{

View File

@@ -34,6 +34,7 @@ const EphemeralRunnerContainerName = "runner"
// +kubebuilder:printcolumn:JSONPath=".status.jobWorkflowRef",name=JobWorkflowRef,type=string // +kubebuilder:printcolumn:JSONPath=".status.jobWorkflowRef",name=JobWorkflowRef,type=string
// +kubebuilder:printcolumn:JSONPath=".status.workflowRunId",name=WorkflowRunId,type=number // +kubebuilder:printcolumn:JSONPath=".status.workflowRunId",name=WorkflowRunId,type=number
// +kubebuilder:printcolumn:JSONPath=".status.jobDisplayName",name=JobDisplayName,type=string // +kubebuilder:printcolumn:JSONPath=".status.jobDisplayName",name=JobDisplayName,type=string
// +kubebuilder:printcolumn:JSONPath=".status.jobId",name=JobId,type=string
// +kubebuilder:printcolumn:JSONPath=".status.message",name=Message,type=string // +kubebuilder:printcolumn:JSONPath=".status.message",name=Message,type=string
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
@@ -50,6 +51,10 @@ func (er *EphemeralRunner) IsDone() bool {
return er.Status.Phase == corev1.PodSucceeded || er.Status.Phase == corev1.PodFailed return er.Status.Phase == corev1.PodSucceeded || er.Status.Phase == corev1.PodFailed
} }
func (er *EphemeralRunner) HasJob() bool {
return len(er.Status.JobID) > 0
}
func (er *EphemeralRunner) HasContainerHookConfigured() bool { func (er *EphemeralRunner) HasContainerHookConfigured() bool {
for i := range er.Spec.Spec.Containers { for i := range er.Spec.Spec.Containers {
if er.Spec.Spec.Containers[i].Name != EphemeralRunnerContainerName { if er.Spec.Spec.Containers[i].Name != EphemeralRunnerContainerName {
@@ -67,6 +72,33 @@ func (er *EphemeralRunner) HasContainerHookConfigured() bool {
return false return false
} }
func (er *EphemeralRunner) GitHubConfigSecret() string {
return er.Spec.GitHubConfigSecret
}
func (er *EphemeralRunner) GitHubConfigUrl() string {
return er.Spec.GitHubConfigUrl
}
func (er *EphemeralRunner) GitHubProxy() *ProxyConfig {
return er.Spec.Proxy
}
func (er *EphemeralRunner) GitHubServerTLS() *TLSConfig {
return er.Spec.GitHubServerTLS
}
func (er *EphemeralRunner) VaultConfig() *VaultConfig {
return er.Spec.VaultConfig
}
func (er *EphemeralRunner) VaultProxy() *ProxyConfig {
if er.Spec.VaultConfig != nil {
return er.Spec.VaultConfig.Proxy
}
return nil
}
// EphemeralRunnerSpec defines the desired state of EphemeralRunner // EphemeralRunnerSpec defines the desired state of EphemeralRunner
type EphemeralRunnerSpec struct { type EphemeralRunnerSpec struct {
// +required // +required
@@ -75,6 +107,9 @@ type EphemeralRunnerSpec struct {
// +required // +required
GitHubConfigSecret string `json:"githubConfigSecret,omitempty"` GitHubConfigSecret string `json:"githubConfigSecret,omitempty"`
// +optional
GitHubServerTLS *TLSConfig `json:"githubServerTLS,omitempty"`
// +required // +required
RunnerScaleSetId int `json:"runnerScaleSetId,omitempty"` RunnerScaleSetId int `json:"runnerScaleSetId,omitempty"`
@@ -85,7 +120,7 @@ type EphemeralRunnerSpec struct {
ProxySecretRef string `json:"proxySecretRef,omitempty"` ProxySecretRef string `json:"proxySecretRef,omitempty"`
// +optional // +optional
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"` VaultConfig *VaultConfig `json:"vaultConfig,omitempty"`
corev1.PodTemplateSpec `json:",inline"` corev1.PodTemplateSpec `json:",inline"`
} }
@@ -115,15 +150,16 @@ type EphemeralRunnerStatus struct {
RunnerId int `json:"runnerId,omitempty"` RunnerId int `json:"runnerId,omitempty"`
// +optional // +optional
RunnerName string `json:"runnerName,omitempty"` RunnerName string `json:"runnerName,omitempty"`
// +optional
RunnerJITConfig string `json:"runnerJITConfig,omitempty"`
// +optional // +optional
Failures map[string]bool `json:"failures,omitempty"` Failures map[string]metav1.Time `json:"failures,omitempty"`
// +optional // +optional
JobRequestId int64 `json:"jobRequestId,omitempty"` JobRequestId int64 `json:"jobRequestId,omitempty"`
// +optional
JobID string `json:"jobId,omitempty"`
// +optional // +optional
JobRepositoryName string `json:"jobRepositoryName,omitempty"` JobRepositoryName string `json:"jobRepositoryName,omitempty"`
@@ -137,6 +173,20 @@ type EphemeralRunnerStatus struct {
JobDisplayName string `json:"jobDisplayName,omitempty"` JobDisplayName string `json:"jobDisplayName,omitempty"`
} }
func (s *EphemeralRunnerStatus) LastFailure() metav1.Time {
var maxTime metav1.Time
if len(s.Failures) == 0 {
return maxTime
}
for _, ts := range s.Failures {
if ts.After(maxTime.Time) {
maxTime = ts
}
}
return maxTime
}
// +kubebuilder:object:root=true // +kubebuilder:object:root=true
// EphemeralRunnerList contains a list of EphemeralRunner // EphemeralRunnerList contains a list of EphemeralRunner

View File

@@ -60,9 +60,35 @@ type EphemeralRunnerSet struct {
Status EphemeralRunnerSetStatus `json:"status,omitempty"` Status EphemeralRunnerSetStatus `json:"status,omitempty"`
} }
// +kubebuilder:object:root=true func (ers *EphemeralRunnerSet) GitHubConfigSecret() string {
return ers.Spec.EphemeralRunnerSpec.GitHubConfigSecret
}
func (ers *EphemeralRunnerSet) GitHubConfigUrl() string {
return ers.Spec.EphemeralRunnerSpec.GitHubConfigUrl
}
func (ers *EphemeralRunnerSet) GitHubProxy() *ProxyConfig {
return ers.Spec.EphemeralRunnerSpec.Proxy
}
func (ers *EphemeralRunnerSet) GitHubServerTLS() *TLSConfig {
return ers.Spec.EphemeralRunnerSpec.GitHubServerTLS
}
func (ers *EphemeralRunnerSet) VaultConfig() *VaultConfig {
return ers.Spec.EphemeralRunnerSpec.VaultConfig
}
func (ers *EphemeralRunnerSet) VaultProxy() *ProxyConfig {
if ers.Spec.EphemeralRunnerSpec.VaultConfig != nil {
return ers.Spec.EphemeralRunnerSpec.VaultConfig.Proxy
}
return nil
}
// EphemeralRunnerSetList contains a list of EphemeralRunnerSet // EphemeralRunnerSetList contains a list of EphemeralRunnerSet
// +kubebuilder:object:root=true
type EphemeralRunnerSetList struct { type EphemeralRunnerSetList struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"` metav1.ListMeta `json:"metadata,omitempty"`

View File

@@ -17,7 +17,7 @@ import (
func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) { func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) {
t.Run("returns an error if CertificateFrom not specified", func(t *testing.T) { t.Run("returns an error if CertificateFrom not specified", func(t *testing.T) {
c := &v1alpha1.GitHubServerTLSConfig{ c := &v1alpha1.TLSConfig{
CertificateFrom: nil, CertificateFrom: nil,
} }
@@ -29,7 +29,7 @@ func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) {
}) })
t.Run("returns an error if CertificateFrom.ConfigMapKeyRef not specified", func(t *testing.T) { t.Run("returns an error if CertificateFrom.ConfigMapKeyRef not specified", func(t *testing.T) {
c := &v1alpha1.GitHubServerTLSConfig{ c := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{}, CertificateFrom: &v1alpha1.TLSCertificateSource{},
} }
@@ -41,7 +41,7 @@ func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) {
}) })
t.Run("returns a valid cert pool with correct configuration", func(t *testing.T) { t.Run("returns a valid cert pool with correct configuration", func(t *testing.T) {
c := &v1alpha1.GitHubServerTLSConfig{ c := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{ LocalObjectReference: v1.LocalObjectReference{

View File

@@ -0,0 +1,72 @@
package v1alpha1
import "strings"
func IsVersionAllowed(resourceVersion, buildVersion string) bool {
if buildVersion == "dev" || resourceVersion == buildVersion || strings.HasPrefix(buildVersion, "canary-") {
return true
}
rv, ok := parseSemver(resourceVersion)
if !ok {
return false
}
bv, ok := parseSemver(buildVersion)
if !ok {
return false
}
return rv.major == bv.major && rv.minor == bv.minor
}
type semver struct {
major string
minor string
}
func parseSemver(v string) (p semver, ok bool) {
if v == "" {
return
}
p.major, v, ok = parseInt(v)
if !ok {
return p, false
}
if v == "" {
p.minor = "0"
return p, true
}
if v[0] != '.' {
return p, false
}
p.minor, v, ok = parseInt(v[1:])
if !ok {
return p, false
}
if v == "" {
return p, true
}
if v[0] != '.' {
return p, false
}
if _, _, ok = parseInt(v[1:]); !ok {
return p, false
}
return p, true
}
func parseInt(v string) (t, rest string, ok bool) {
if v == "" {
return
}
if v[0] < '0' || '9' < v[0] {
return
}
i := 1
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
if v[0] == '0' && i != 1 {
return
}
return v[:i], v[i:], true
}

View File

@@ -0,0 +1,60 @@
package v1alpha1_test
import (
"testing"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/stretchr/testify/assert"
)
func TestIsVersionAllowed(t *testing.T) {
t.Parallel()
tt := map[string]struct {
resourceVersion string
buildVersion string
want bool
}{
"dev should always be allowed": {
resourceVersion: "0.11.0",
buildVersion: "dev",
want: true,
},
"resourceVersion is not semver": {
resourceVersion: "dev",
buildVersion: "0.11.0",
want: false,
},
"buildVersion is not semver": {
resourceVersion: "0.11.0",
buildVersion: "NA",
want: false,
},
"major version mismatch": {
resourceVersion: "0.11.0",
buildVersion: "1.11.0",
want: false,
},
"minor version mismatch": {
resourceVersion: "0.11.0",
buildVersion: "0.10.0",
want: false,
},
"patch version mismatch": {
resourceVersion: "0.11.1",
buildVersion: "0.11.0",
want: true,
},
"arbitrary version match": {
resourceVersion: "abc",
buildVersion: "abc",
want: true,
},
}
for name, tc := range tt {
t.Run(name, func(t *testing.T) {
got := v1alpha1.IsVersionAllowed(tc.resourceVersion, tc.buildVersion)
assert.Equal(t, tc.want, got)
})
}
}

View File

@@ -22,6 +22,7 @@ package v1alpha1
import ( import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
) )
@@ -99,7 +100,12 @@ func (in *AutoscalingListenerSpec) DeepCopyInto(out *AutoscalingListenerSpec) {
} }
if in.GitHubServerTLS != nil { if in.GitHubServerTLS != nil {
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
*out = new(GitHubServerTLSConfig) *out = new(TLSConfig)
(*in).DeepCopyInto(*out)
}
if in.VaultConfig != nil {
in, out := &in.VaultConfig, &out.VaultConfig
*out = new(VaultConfig)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.Metrics != nil { if in.Metrics != nil {
@@ -208,7 +214,12 @@ func (in *AutoscalingRunnerSetSpec) DeepCopyInto(out *AutoscalingRunnerSetSpec)
} }
if in.GitHubServerTLS != nil { if in.GitHubServerTLS != nil {
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
*out = new(GitHubServerTLSConfig) *out = new(TLSConfig)
(*in).DeepCopyInto(*out)
}
if in.VaultConfig != nil {
in, out := &in.VaultConfig, &out.VaultConfig
*out = new(VaultConfig)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
in.Template.DeepCopyInto(&out.Template) in.Template.DeepCopyInto(&out.Template)
@@ -259,6 +270,21 @@ func (in *AutoscalingRunnerSetStatus) DeepCopy() *AutoscalingRunnerSetStatus {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureKeyVaultConfig) DeepCopyInto(out *AzureKeyVaultConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureKeyVaultConfig.
func (in *AzureKeyVaultConfig) DeepCopy() *AzureKeyVaultConfig {
if in == nil {
return nil
}
out := new(AzureKeyVaultConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CounterMetric) DeepCopyInto(out *CounterMetric) { func (in *CounterMetric) DeepCopyInto(out *CounterMetric) {
*out = *in *out = *in
@@ -431,14 +457,19 @@ func (in *EphemeralRunnerSetStatus) DeepCopy() *EphemeralRunnerSetStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EphemeralRunnerSpec) DeepCopyInto(out *EphemeralRunnerSpec) { func (in *EphemeralRunnerSpec) DeepCopyInto(out *EphemeralRunnerSpec) {
*out = *in *out = *in
if in.GitHubServerTLS != nil {
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
*out = new(TLSConfig)
(*in).DeepCopyInto(*out)
}
if in.Proxy != nil { if in.Proxy != nil {
in, out := &in.Proxy, &out.Proxy in, out := &in.Proxy, &out.Proxy
*out = new(ProxyConfig) *out = new(ProxyConfig)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.GitHubServerTLS != nil { if in.VaultConfig != nil {
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS in, out := &in.VaultConfig, &out.VaultConfig
*out = new(GitHubServerTLSConfig) *out = new(VaultConfig)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
in.PodTemplateSpec.DeepCopyInto(&out.PodTemplateSpec) in.PodTemplateSpec.DeepCopyInto(&out.PodTemplateSpec)
@@ -459,9 +490,9 @@ func (in *EphemeralRunnerStatus) DeepCopyInto(out *EphemeralRunnerStatus) {
*out = *in *out = *in
if in.Failures != nil { if in.Failures != nil {
in, out := &in.Failures, &out.Failures in, out := &in.Failures, &out.Failures
*out = make(map[string]bool, len(*in)) *out = make(map[string]metav1.Time, len(*in))
for key, val := range *in { for key, val := range *in {
(*out)[key] = val (*out)[key] = *val.DeepCopy()
} }
} }
} }
@@ -496,26 +527,6 @@ func (in *GaugeMetric) DeepCopy() *GaugeMetric {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitHubServerTLSConfig) DeepCopyInto(out *GitHubServerTLSConfig) {
*out = *in
if in.CertificateFrom != nil {
in, out := &in.CertificateFrom, &out.CertificateFrom
*out = new(TLSCertificateSource)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubServerTLSConfig.
func (in *GitHubServerTLSConfig) DeepCopy() *GitHubServerTLSConfig {
if in == nil {
return nil
}
out := new(GitHubServerTLSConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HistogramMetric) DeepCopyInto(out *HistogramMetric) { func (in *HistogramMetric) DeepCopyInto(out *HistogramMetric) {
*out = *in *out = *in
@@ -668,3 +679,48 @@ func (in *TLSCertificateSource) DeepCopy() *TLSCertificateSource {
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TLSConfig) DeepCopyInto(out *TLSConfig) {
*out = *in
if in.CertificateFrom != nil {
in, out := &in.CertificateFrom, &out.CertificateFrom
*out = new(TLSCertificateSource)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig.
func (in *TLSConfig) DeepCopy() *TLSConfig {
if in == nil {
return nil
}
out := new(TLSConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VaultConfig) DeepCopyInto(out *VaultConfig) {
*out = *in
if in.AzureKeyVault != nil {
in, out := &in.AzureKeyVault, &out.AzureKeyVault
*out = new(AzureKeyVaultConfig)
**out = **in
}
if in.Proxy != nil {
in, out := &in.Proxy, &out.Proxy
*out = new(ProxyConfig)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultConfig.
func (in *VaultConfig) DeepCopy() *VaultConfig {
if in == nil {
return nil
}
out := new(VaultConfig)
in.DeepCopyInto(out)
return out
}

View File

@@ -215,10 +215,10 @@ func (rs *RunnerSpec) validateRepository() error {
foundCount += 1 foundCount += 1
} }
if foundCount == 0 { if foundCount == 0 {
return errors.New("Spec needs enterprise, organization or repository") return errors.New("spec needs enterprise, organization or repository")
} }
if foundCount > 1 { if foundCount > 1 {
return errors.New("Spec cannot have many fields defined enterprise, organization and repository") return errors.New("spec cannot have many fields defined enterprise, organization and repository")
} }
return nil return nil

View File

@@ -1,9 +1,11 @@
# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow # This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow
remote: origin
target-branch: master
lint-conf: charts/.ci/lint-config.yaml lint-conf: charts/.ci/lint-config.yaml
chart-repos: chart-repos:
- jetstack=https://charts.jetstack.io - jetstack=https://charts.jetstack.io
check-version-increment: false # Disable checking that the chart version has been bumped check-version-increment: false # Disable checking that the chart version has been bumped
charts: charts:
- charts/gha-runner-scale-set-controller - charts/gha-runner-scale-set-controller
- charts/gha-runner-scale-set - charts/gha-runner-scale-set
skip-clean-up: true skip-clean-up: true

View File

@@ -1,7 +1,9 @@
# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow # This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow
remote: origin
target-branch: master
lint-conf: charts/.ci/lint-config.yaml lint-conf: charts/.ci/lint-config.yaml
chart-repos: chart-repos:
- jetstack=https://charts.jetstack.io - jetstack=https://charts.jetstack.io
check-version-increment: false # Disable checking that the chart version has been bumped check-version-increment: false # Disable checking that the chart version has been bumped
charts: charts:
- charts/actions-runner-controller - charts/actions-runner-controller

View File

@@ -1,6 +1,5 @@
#!/bin/bash #!/bin/bash
for chart in `ls charts`; for chart in `ls charts`;
do do
helm template --values charts/$chart/ci/ci-values.yaml charts/$chart | kube-score score - \ helm template --values charts/$chart/ci/ci-values.yaml charts/$chart | kube-score score - \
@@ -12,4 +11,4 @@ helm template --values charts/$chart/ci/ci-values.yaml charts/$chart | kube-scor
--enable-optional-test container-security-context-privileged \ --enable-optional-test container-security-context-privileged \
--enable-optional-test container-security-context-readonlyrootfilesystem \ --enable-optional-test container-security-context-readonlyrootfilesystem \
--ignore-test container-security-context --ignore-test container-security-context
done done

View File

@@ -44,7 +44,7 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent | | `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
| `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false | | `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
| `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m | | `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
| `metrics.serviceMonitor.namespace | Namespace which Prometheus is running in | `Release.Namespace` (the default namespace of the helm chart). | | `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `Release.Namespace` (the default namespace of the helm chart). |
| `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s | | `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | | | `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
| `metrics.port` | Set port of metrics service | 8443 | | `metrics.port` | Set port of metrics service | 8443 |

View File

@@ -12,306 +12,313 @@ spec:
listKind: HorizontalRunnerAutoscalerList listKind: HorizontalRunnerAutoscalerList
plural: horizontalrunnerautoscalers plural: horizontalrunnerautoscalers
shortNames: shortNames:
- hra - hra
singular: horizontalrunnerautoscaler singular: horizontalrunnerautoscaler
scope: Namespaced scope: Namespaced
versions: versions:
- additionalPrinterColumns: - additionalPrinterColumns:
- jsonPath: .spec.minReplicas - jsonPath: .spec.minReplicas
name: Min name: Min
type: number type: number
- jsonPath: .spec.maxReplicas - jsonPath: .spec.maxReplicas
name: Max name: Max
type: number type: number
- jsonPath: .status.desiredReplicas - jsonPath: .status.desiredReplicas
name: Desired name: Desired
type: number type: number
- jsonPath: .status.scheduledOverridesSummary - jsonPath: .status.scheduledOverridesSummary
name: Schedule name: Schedule
type: string type: string
name: v1alpha1 name: v1alpha1
schema: schema:
openAPIV3Schema: openAPIV3Schema:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
properties: API
apiVersion: properties:
description: |- apiVersion:
APIVersion defines the versioned schema of this representation of an object. description: |-
Servers should convert recognized schemas to the latest internal value, and APIVersion defines the versioned schema of this representation of an object.
may reject unrecognized values. Servers should convert recognized schemas to the latest internal value, and
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources may reject unrecognized values.
type: string More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
kind: type: string
description: |- kind:
Kind is a string value representing the REST resource this object represents. description: |-
Servers may infer this from the endpoint the client submits requests to. Kind is a string value representing the REST resource this object represents.
Cannot be updated. Servers may infer this from the endpoint the client submits requests to.
In CamelCase. Cannot be updated.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds In CamelCase.
type: string More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
metadata: type: string
type: object metadata:
spec: type: object
description: HorizontalRunnerAutoscalerSpec defines the desired state of HorizontalRunnerAutoscaler spec:
properties: description: HorizontalRunnerAutoscalerSpec defines the desired state
capacityReservations: of HorizontalRunnerAutoscaler
items: properties:
description: |- capacityReservations:
CapacityReservation specifies the number of replicas temporarily added items:
to the scale target until ExpirationTime.
properties:
effectiveTime:
format: date-time
type: string
expirationTime:
format: date-time
type: string
name:
type: string
replicas:
type: integer
type: object
type: array
githubAPICredentialsFrom:
properties:
secretRef:
properties:
name:
type: string
required:
- name
type: object
type: object
maxReplicas:
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
type: integer
metrics:
description: Metrics is the collection of various metric targets to calculate desired number of runners
items:
properties:
repositoryNames:
description: |-
RepositoryNames is the list of repository names to be used for calculating the metric.
For example, a repository name is the REPO part of `github.com/USER/REPO`.
items:
type: string
type: array
scaleDownAdjustment:
description: |-
ScaleDownAdjustment is the number of runners removed on scale-down.
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
type: integer
scaleDownFactor:
description: |-
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be removed.
type: string
scaleDownThreshold:
description: |-
ScaleDownThreshold is the percentage of busy runners less than which will
trigger the hpa to scale the runners down.
type: string
scaleUpAdjustment:
description: |-
ScaleUpAdjustment is the number of runners added on scale-up.
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
type: integer
scaleUpFactor:
description: |-
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be added.
type: string
scaleUpThreshold:
description: |-
ScaleUpThreshold is the percentage of busy runners greater than which will
trigger the hpa to scale runners up.
type: string
type:
description: |-
Type is the type of metric to be used for autoscaling.
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
type: string
type: object
type: array
minReplicas:
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
type: integer
scaleDownDelaySecondsAfterScaleOut:
description: |- description: |-
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up CapacityReservation specifies the number of replicas temporarily added
Used to prevent flapping (down->up->down->... loop) to the scale target until ExpirationTime.
type: integer
scaleTargetRef:
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
properties: properties:
kind: effectiveTime:
description: Kind is the type of resource being referenced format: date-time
enum: type: string
- RunnerDeployment expirationTime:
- RunnerSet format: date-time
type: string type: string
name: name:
description: Name is the name of resource being referenced
type: string type: string
replicas:
type: integer
type: object type: object
scaleUpTriggers: type: array
description: |- githubAPICredentialsFrom:
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1 properties:
on each webhook requested received by the webhookBasedAutoscaler. secretRef:
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
items:
properties: properties:
amount: name:
type: integer
duration:
type: string
githubEvent:
properties:
checkRun:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties:
names:
description: |-
Names is a list of GitHub Actions glob patterns.
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to trigger depending on the job.
items:
type: string
type: array
repositories:
description: |-
Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
items:
type: string
type: array
status:
type: string
types:
description: 'One of: created, rerequested, or completed'
items:
type: string
type: array
type: object
pullRequest:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
properties:
branches:
items:
type: string
type: array
types:
items:
type: string
type: array
type: object
push:
description: |-
PushSpec is the condition for triggering scale-up on push event
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object
workflowJob:
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
type: object
type: object
type: object
type: array
scheduledOverrides:
description: |-
ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
The earlier a scheduled override is, the higher it is prioritized.
items:
description: |-
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
properties:
endTime:
description: EndTime is the time at which the first override ends.
format: date-time
type: string
minReplicas:
description: |-
MinReplicas is the number of runners while overriding.
If omitted, it doesn't override minReplicas.
minimum: 0
nullable: true
type: integer
recurrenceRule:
properties:
frequency:
description: |-
Frequency is the name of a predefined interval of each recurrence.
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
If empty, the corresponding override happens only once.
enum:
- Daily
- Weekly
- Monthly
- Yearly
type: string
untilTime:
description: |-
UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
format: date-time
type: string
type: object
startTime:
description: StartTime is the time at which the first override starts.
format: date-time
type: string type: string
required: required:
- endTime - name
- startTime
type: object type: object
type: array type: object
type: object maxReplicas:
status: description: MaxReplicas is the maximum number of replicas the deployment
properties: is allowed to scale
cacheEntries: type: integer
items: metrics:
properties: description: Metrics is the collection of various metric targets to
expirationTime: calculate desired number of runners
format: date-time items:
properties:
repositoryNames:
description: |-
RepositoryNames is the list of repository names to be used for calculating the metric.
For example, a repository name is the REPO part of `github.com/USER/REPO`.
items:
type: string type: string
key: type: array
type: string scaleDownAdjustment:
value: description: |-
type: integer ScaleDownAdjustment is the number of runners removed on scale-down.
type: object You can only specify either ScaleDownFactor or ScaleDownAdjustment.
type: array type: integer
desiredReplicas: scaleDownFactor:
description: |-
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be removed.
type: string
scaleDownThreshold:
description: |-
ScaleDownThreshold is the percentage of busy runners less than which will
trigger the hpa to scale the runners down.
type: string
scaleUpAdjustment:
description: |-
ScaleUpAdjustment is the number of runners added on scale-up.
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
type: integer
scaleUpFactor:
description: |-
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be added.
type: string
scaleUpThreshold:
description: |-
ScaleUpThreshold is the percentage of busy runners greater than which will
trigger the hpa to scale runners up.
type: string
type:
description: |-
Type is the type of metric to be used for autoscaling.
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
type: string
type: object
type: array
minReplicas:
description: MinReplicas is the minimum number of replicas the deployment
is allowed to scale
type: integer
scaleDownDelaySecondsAfterScaleOut:
description: |-
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
Used to prevent flapping (down->up->down->... loop)
type: integer
scaleTargetRef:
description: ScaleTargetRef is the reference to scaled resource like
RunnerDeployment
properties:
kind:
description: Kind is the type of resource being referenced
enum:
- RunnerDeployment
- RunnerSet
type: string
name:
description: Name is the name of resource being referenced
type: string
type: object
scaleUpTriggers:
description: |-
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
on each webhook requested received by the webhookBasedAutoscaler.
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
items:
properties:
amount:
type: integer
duration:
type: string
githubEvent:
properties:
checkRun:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties:
names:
description: |-
Names is a list of GitHub Actions glob patterns.
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to trigger depending on the job.
items:
type: string
type: array
repositories:
description: |-
Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
items:
type: string
type: array
status:
type: string
types:
description: 'One of: created, rerequested, or completed'
items:
type: string
type: array
type: object
pullRequest:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
properties:
branches:
items:
type: string
type: array
types:
items:
type: string
type: array
type: object
push:
description: |-
PushSpec is the condition for triggering scale-up on push event
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object
workflowJob:
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
type: object
type: object
type: object
type: array
scheduledOverrides:
description: |-
ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
The earlier a scheduled override is, the higher it is prioritized.
items:
description: |- description: |-
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset. A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
type: integer properties:
lastSuccessfulScaleOutTime: endTime:
format: date-time description: EndTime is the time at which the first override
nullable: true ends.
type: string format: date-time
observedGeneration: type: string
description: |- minReplicas:
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g. description: |-
RunnerDeployment's generation, which is updated on mutation by the API Server. MinReplicas is the number of runners while overriding.
format: int64 If omitted, it doesn't override minReplicas.
type: integer minimum: 0
scheduledOverridesSummary: nullable: true
description: |- type: integer
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output recurrenceRule:
for observability. properties:
type: string frequency:
type: object description: |-
type: object Frequency is the name of a predefined interval of each recurrence.
served: true The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
storage: true If empty, the corresponding override happens only once.
subresources: enum:
status: {} - Daily
preserveUnknownFields: false - Weekly
- Monthly
- Yearly
type: string
untilTime:
description: |-
UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
format: date-time
type: string
type: object
startTime:
description: StartTime is the time at which the first override
starts.
format: date-time
type: string
required:
- endTime
- startTime
type: object
type: array
type: object
status:
properties:
cacheEntries:
items:
properties:
expirationTime:
format: date-time
type: string
key:
type: string
value:
type: integer
type: object
type: array
desiredReplicas:
description: |-
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
type: integer
lastSuccessfulScaleOutTime:
format: date-time
nullable: true
type: string
observedGeneration:
description: |-
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
RunnerDeployment's generation, which is updated on mutation by the API Server.
format: int64
type: integer
scheduledOverridesSummary:
description: |-
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
for observability.
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -9348,4 +9348,3 @@ spec:
storage: true storage: true
subresources: subresources:
status: {} status: {}
preserveUnknownFields: false

View File

@@ -9322,4 +9322,3 @@ spec:
storage: true storage: true
subresources: subresources:
status: {} status: {}
preserveUnknownFields: false

View File

@@ -9328,4 +9328,3 @@ spec:
storage: true storage: true
subresources: subresources:
status: {} status: {}
preserveUnknownFields: false

View File

@@ -8389,4 +8389,3 @@ spec:
storage: true storage: true
subresources: subresources:
status: {} status: {}
preserveUnknownFields: false

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.11.0 version: 0.12.1
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: "0.11.0" appVersion: "0.12.1"
home: https://github.com/actions/actions-runner-controller home: https://github.com/actions/actions-runner-controller

View File

@@ -15504,6 +15504,53 @@ spec:
- containers - containers
type: object type: object
type: object type: object
vaultConfig:
properties:
azureKeyVault:
properties:
certificatePath:
type: string
clientId:
type: string
tenantId:
type: string
url:
type: string
required:
- certificatePath
- clientId
- tenantId
- url
type: object
proxy:
properties:
http:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
https:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
noProxy:
items:
type: string
type: array
type: object
type:
description: |-
VaultType represents the type of vault that can be used in the application.
It is used to identify which vault integration should be used to resolve secrets.
type: string
type: object
type: object type: object
status: status:
description: AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet description: AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet
@@ -15524,4 +15571,3 @@ spec:
storage: true storage: true
subresources: subresources:
status: {} status: {}
preserveUnknownFields: false

View File

@@ -36,6 +36,9 @@ spec:
- jsonPath: .status.jobDisplayName - jsonPath: .status.jobDisplayName
name: JobDisplayName name: JobDisplayName
type: string type: string
- jsonPath: .status.jobId
name: JobId
type: string
- jsonPath: .status.message - jsonPath: .status.message
name: Message name: Message
type: string type: string
@@ -7784,6 +7787,53 @@ spec:
required: required:
- containers - containers
type: object type: object
vaultConfig:
properties:
azureKeyVault:
properties:
certificatePath:
type: string
clientId:
type: string
tenantId:
type: string
url:
type: string
required:
- certificatePath
- clientId
- tenantId
- url
type: object
proxy:
properties:
http:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
https:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
noProxy:
items:
type: string
type: array
type: object
type:
description: |-
VaultType represents the type of vault that can be used in the application.
It is used to identify which vault integration should be used to resolve secrets.
type: string
type: object
required: required:
- githubConfigSecret - githubConfigSecret
- githubConfigUrl - githubConfigUrl
@@ -7794,10 +7844,13 @@ spec:
properties: properties:
failures: failures:
additionalProperties: additionalProperties:
type: boolean format: date-time
type: string
type: object type: object
jobDisplayName: jobDisplayName:
type: string type: string
jobId:
type: string
jobRepositoryName: jobRepositoryName:
type: string type: string
jobRequestId: jobRequestId:
@@ -7826,8 +7879,6 @@ spec:
type: string type: string
runnerId: runnerId:
type: integer type: integer
runnerJITConfig:
type: string
runnerName: runnerName:
type: string type: string
workflowRunId: workflowRunId:
@@ -7839,4 +7890,3 @@ spec:
storage: true storage: true
subresources: subresources:
status: {} status: {}
preserveUnknownFields: false

View File

@@ -7778,6 +7778,53 @@ spec:
required: required:
- containers - containers
type: object type: object
vaultConfig:
properties:
azureKeyVault:
properties:
certificatePath:
type: string
clientId:
type: string
tenantId:
type: string
url:
type: string
required:
- certificatePath
- clientId
- tenantId
- url
type: object
proxy:
properties:
http:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
https:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
noProxy:
items:
type: string
type: array
type: object
type:
description: |-
VaultType represents the type of vault that can be used in the application.
It is used to identify which vault integration should be used to resolve secrets.
type: string
type: object
required: required:
- githubConfigSecret - githubConfigSecret
- githubConfigUrl - githubConfigUrl
@@ -7812,4 +7859,3 @@ spec:
storage: true storage: true
subresources: subresources:
status: {} status: {}
preserveUnknownFields: false

View File

@@ -129,11 +129,3 @@ Create the name of the service account to use
{{- define "gha-runner-scale-set-controller.leaderElectionRoleBinding" -}} {{- define "gha-runner-scale-set-controller.leaderElectionRoleBinding" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election {{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election
{{- end }} {{- end }}
{{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}}
{{- $names := list }}
{{- range $k, $v := . }}
{{- $names = append $names $v.name }}
{{- end }}
{{- $names | join ","}}
{{- end }}

View File

@@ -54,7 +54,9 @@ spec:
- "--leader-election-id={{ include "gha-runner-scale-set-controller.fullname" . }}" - "--leader-election-id={{ include "gha-runner-scale-set-controller.fullname" . }}"
{{- end }} {{- end }}
{{- with .Values.imagePullSecrets }} {{- with .Values.imagePullSecrets }}
- "--auto-scaler-image-pull-secrets={{ include "gha-runner-scale-set-controller.imagePullSecretsNames" . }}" {{- range . }}
- "--auto-scaler-image-pull-secrets={{- .name -}}"
{{- end }}
{{- end }} {{- end }}
{{- with .Values.flags.logLevel }} {{- with .Values.flags.logLevel }}
- "--log-level={{ . }}" - "--log-level={{ . }}"

View File

@@ -683,7 +683,8 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
expectedArgs := []string{ expectedArgs := []string{
"--auto-scaling-runner-set-only", "--auto-scaling-runner-set-only",
"--auto-scaler-image-pull-secrets=dockerhub,ghcr", "--auto-scaler-image-pull-secrets=dockerhub",
"--auto-scaler-image-pull-secrets=ghcr",
"--log-level=debug", "--log-level=debug",
"--log-format=text", "--log-format=text",
"--update-strategy=immediate", "--update-strategy=immediate",
@@ -1079,6 +1080,7 @@ func TestDeployment_excludeLabelPropagationPrefixes(t *testing.T) {
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=prefix.com/") assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=prefix.com/")
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=complete.io/label") assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=complete.io/label")
} }
func TestNamespaceOverride(t *testing.T) { func TestNamespaceOverride(t *testing.T) {
t.Parallel() t.Parallel()

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.11.0 version: 0.12.1
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: "0.11.0" appVersion: "0.12.1"
home: https://github.com/actions/actions-runner-controller home: https://github.com/actions/actions-runner-controller

View File

@@ -62,12 +62,12 @@ app.kubernetes.io/instance: {{ include "gha-runner-scale-set.scale-set-name" . }
{{- fail "Values.githubConfigSecret is required for setting auth with GitHub server." }} {{- fail "Values.githubConfigSecret is required for setting auth with GitHub server." }}
{{- end }} {{- end }}
{{- else }} {{- else }}
{{- include "gha-runner-scale-set.fullname" . }}-github-secret {{- include "gha-runner-scale-set.fullname" . | replace "_" "-" }}-github-secret
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- define "gha-runner-scale-set.noPermissionServiceAccountName" -}} {{- define "gha-runner-scale-set.noPermissionServiceAccountName" -}}
{{- include "gha-runner-scale-set.fullname" . }}-no-permission {{- include "gha-runner-scale-set.fullname" . | replace "_" "-" }}-no-permission
{{- end }} {{- end }}
{{- define "gha-runner-scale-set.kubeModeRoleName" -}} {{- define "gha-runner-scale-set.kubeModeRoleName" -}}
@@ -79,7 +79,7 @@ app.kubernetes.io/instance: {{ include "gha-runner-scale-set.scale-set-name" . }
{{- end }} {{- end }}
{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}} {{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}}
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode {{- include "gha-runner-scale-set.fullname" . | replace "_" "-" }}-kube-mode
{{- end }} {{- end }}
{{- define "gha-runner-scale-set.dind-init-container" -}} {{- define "gha-runner-scale-set.dind-init-container" -}}
@@ -106,6 +106,17 @@ env:
value: "123" value: "123"
securityContext: securityContext:
privileged: true privileged: true
{{- if (ge (.Capabilities.KubeVersion.Minor | int) 29) }}
restartPolicy: Always
startupProbe:
exec:
command:
- docker
- info
initialDelaySeconds: 0
failureThreshold: 24
periodSeconds: 5
{{- end }}
volumeMounts: volumeMounts:
- name: work - name: work
mountPath: /home/runner/_work mountPath: /home/runner/_work
@@ -366,6 +377,101 @@ volumeMounts:
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- define "gha-runner-scale-set.kubernetes-novolume-mode-runner-container" -}}
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
{{- range $i, $container := .Values.template.spec.containers }}
{{- if eq $container.name "runner" }}
{{- $setRunnerImage := "" }}
{{- range $key, $val := $container }}
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
{{- if eq $key "image" }}
{{- $setRunnerImage = $val }}
{{- end }}
{{ $key }}: {{ $val | toYaml | nindent 2 }}
{{- end }}
{{- end }}
{{- $setContainerHooks := 1 }}
{{- $setPodName := 1 }}
{{- $setRequireJobContainer := 1 }}
{{- $setActionsRunnerImage := 1 }}
{{- $setNodeExtraCaCerts := 0 }}
{{- $setRunnerUpdateCaCerts := 0 }}
{{- if $tlsConfig.runnerMountPath }}
{{- $setNodeExtraCaCerts = 1 }}
{{- $setRunnerUpdateCaCerts = 1 }}
{{- end }}
env:
{{- with $container.env }}
{{- range $i, $env := . }}
{{- if eq $env.name "ACTIONS_RUNNER_CONTAINER_HOOKS" }}
{{- $setContainerHooks = 0 }}
{{- end }}
{{- if eq $env.name "ACTIONS_RUNNER_IMAGE" }}
{{- $setActionsRunnerImage = 0 }}
{{- end }}
{{- if eq $env.name "ACTIONS_RUNNER_POD_NAME" }}
{{- $setPodName = 0 }}
{{- end }}
{{- if eq $env.name "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER" }}
{{- $setRequireJobContainer = 0 }}
{{- end }}
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
{{- $setNodeExtraCaCerts = 0 }}
{{- end }}
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
{{- $setRunnerUpdateCaCerts = 0 }}
{{- end }}
- {{ $env | toYaml | nindent 4 }}
{{- end }}
{{- end }}
{{- if $setContainerHooks }}
- name: ACTIONS_RUNNER_CONTAINER_HOOKS
value: /home/runner/k8s-novolume/index.js
{{- end }}
{{- if $setPodName }}
- name: ACTIONS_RUNNER_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
{{- end }}
{{- if $setRequireJobContainer }}
- name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER
value: "true"
{{- end }}
{{- if $setActionsRunnerImage }}
- name: ACTIONS_RUNNER_IMAGE
value: "{{- $setRunnerImage -}}"
{{- end }}
{{- if $setNodeExtraCaCerts }}
- name: NODE_EXTRA_CA_CERTS
value: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
{{- end }}
{{- if $setRunnerUpdateCaCerts }}
- name: RUNNER_UPDATE_CA_CERTS
value: "1"
{{- end }}
{{- $mountGitHubServerTLS := 0 }}
{{- if $tlsConfig.runnerMountPath }}
{{- $mountGitHubServerTLS = 1 }}
{{- end }}
volumeMounts:
{{- with $container.volumeMounts }}
{{- range $i, $volMount := . }}
{{- if eq $volMount.name "github-server-tls-cert" }}
{{- $mountGitHubServerTLS = 0 }}
{{- end }}
- {{ $volMount | toYaml | nindent 4 }}
{{- end }}
{{- end }}
{{- if $mountGitHubServerTLS }}
- name: github-server-tls-cert
mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- define "gha-runner-scale-set.default-mode-runner-containers" -}} {{- define "gha-runner-scale-set.default-mode-runner-containers" -}}
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} {{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
{{- range $i, $container := .Values.template.spec.containers }} {{- range $i, $container := .Values.template.spec.containers }}

View File

@@ -8,7 +8,7 @@ metadata:
{{- if gt (len (include "gha-runner-scale-set.namespace" .)) 63 }} {{- if gt (len (include "gha-runner-scale-set.namespace" .)) 63 }}
{{ fail "Namespace must have up to 63 characters" }} {{ fail "Namespace must have up to 63 characters" }}
{{- end }} {{- end }}
name: {{ include "gha-runner-scale-set.scale-set-name" . }} name: {{ include "gha-runner-scale-set.scale-set-name" . | replace "_" "-" }}
namespace: {{ include "gha-runner-scale-set.namespace" . }} namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels: labels:
{{- with .Values.labels }} {{- with .Values.labels }}
@@ -37,14 +37,15 @@ metadata:
{{- end }} {{- end }}
actions.github.com/cleanup-manager-role-binding: {{ include "gha-runner-scale-set.managerRoleBindingName" . }} actions.github.com/cleanup-manager-role-binding: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
actions.github.com/cleanup-manager-role-name: {{ include "gha-runner-scale-set.managerRoleName" . }} actions.github.com/cleanup-manager-role-name: {{ include "gha-runner-scale-set.managerRoleName" . }}
{{- if and $containerMode (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} {{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
actions.github.com/cleanup-kubernetes-mode-role-binding-name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }} actions.github.com/cleanup-kubernetes-mode-role-binding-name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
actions.github.com/cleanup-kubernetes-mode-role-name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }} actions.github.com/cleanup-kubernetes-mode-role-name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
actions.github.com/cleanup-kubernetes-mode-service-account-name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }} actions.github.com/cleanup-kubernetes-mode-service-account-name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
{{- end }} {{- end }}
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} {{- if and (ne $containerMode.type "kubernetes") (ne $containerMode.type "kubernetes-novolume") (not .Values.template.spec.serviceAccountName) }}
actions.github.com/cleanup-no-permission-service-account-name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }} actions.github.com/cleanup-no-permission-service-account-name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
{{- end }} {{- end }}
spec: spec:
githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }} githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }}
githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }} githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }}
@@ -65,6 +66,24 @@ spec:
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if and .Values.keyVault .Values.keyVault.type }}
vaultConfig:
type: {{ .Values.keyVault.type }}
{{- if .Values.keyVault.proxy }}
proxy: {{- toYaml .Values.keyVault.proxy | nindent 6 }}
{{- end }}
{{- if eq .Values.keyVault.type "azure_key_vault" }}
azureKeyVault:
url: {{ .Values.keyVault.azureKeyVault.url }}
tenantId: {{ .Values.keyVault.azureKeyVault.tenantId }}
clientId: {{ .Values.keyVault.azureKeyVault.clientId }}
certificatePath: {{ .Values.keyVault.azureKeyVault.certificatePath }}
secretKey: {{ .Values.keyVault.azureKeyVault.secretKey }}
{{- else }}
{{- fail "Unsupported keyVault type: " .Values.keyVault.type }}
{{- end }}
{{- end }}
{{- if .Values.proxy }} {{- if .Values.proxy }}
proxy: proxy:
{{- if .Values.proxy.http }} {{- if .Values.proxy.http }}
@@ -138,7 +157,7 @@ spec:
restartPolicy: Never restartPolicy: Never
{{- end }} {{- end }}
{{- $containerMode := .Values.containerMode }} {{- $containerMode := .Values.containerMode }}
{{- if eq $containerMode.type "kubernetes" }} {{- if or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume") }}
serviceAccountName: {{ default (include "gha-runner-scale-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }} serviceAccountName: {{ default (include "gha-runner-scale-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }}
{{- else }} {{- else }}
serviceAccountName: {{ default (include "gha-runner-scale-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }} serviceAccountName: {{ default (include "gha-runner-scale-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }}
@@ -147,7 +166,11 @@ spec:
initContainers: initContainers:
{{- if eq $containerMode.type "dind" }} {{- if eq $containerMode.type "dind" }}
- name: init-dind-externals - name: init-dind-externals
{{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }} {{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }}
{{- if (ge (.Capabilities.KubeVersion.Minor | int) 29) }}
- name: dind
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
{{- end }}
{{- end }} {{- end }}
{{- with .Values.template.spec.initContainers }} {{- with .Values.template.spec.initContainers }}
{{- toYaml . | nindent 6 }} {{- toYaml . | nindent 6 }}
@@ -157,18 +180,24 @@ spec:
{{- if eq $containerMode.type "dind" }} {{- if eq $containerMode.type "dind" }}
- name: runner - name: runner
{{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }} {{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }}
{{- if not (ge (.Capabilities.KubeVersion.Minor | int) 29) }}
- name: dind - name: dind
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }} {{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
{{- end }}
{{- include "gha-runner-scale-set.non-runner-non-dind-containers" . | nindent 6 }} {{- include "gha-runner-scale-set.non-runner-non-dind-containers" . | nindent 6 }}
{{- else if eq $containerMode.type "kubernetes" }} {{- else if eq $containerMode.type "kubernetes" }}
- name: runner - name: runner
{{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }} {{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }}
{{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }} {{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
{{- else if eq $containerMode.type "kubernetes-novolume" }}
- name: runner
{{- include "gha-runner-scale-set.kubernetes-novolume-mode-runner-container" . | nindent 8 }}
{{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
{{- else }} {{- else }}
{{- include "gha-runner-scale-set.default-mode-runner-containers" . | nindent 6 }} {{- include "gha-runner-scale-set.default-mode-runner-containers" . | nindent 6 }}
{{- end }} {{- end }}
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }} {{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
{{- if or .Values.template.spec.volumes (eq $containerMode.type "dind") (eq $containerMode.type "kubernetes") $tlsConfig.runnerMountPath }} {{- if or .Values.template.spec.volumes (eq $containerMode.type "dind") (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume") $tlsConfig.runnerMountPath }}
volumes: volumes:
{{- if $tlsConfig.runnerMountPath }} {{- if $tlsConfig.runnerMountPath }}
{{- include "gha-runner-scale-set.tls-volume" $tlsConfig | nindent 6 }} {{- include "gha-runner-scale-set.tls-volume" $tlsConfig | nindent 6 }}

View File

@@ -1,6 +1,6 @@
{{- $containerMode := .Values.containerMode }} {{- $containerMode := .Values.containerMode }}
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeRole) }} {{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeRole) }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} {{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
# default permission for runner pod service account in kubernetes mode (container hook) # default permission for runner pod service account in kubernetes mode (container hook)
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: Role kind: Role
@@ -38,9 +38,11 @@ rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["pods/log"] resources: ["pods/log"]
verbs: ["get", "list", "watch",] verbs: ["get", "list", "watch",]
{{- if ne $containerMode.type "kubernetes-novolume" }}
- apiGroups: ["batch"] - apiGroups: ["batch"]
resources: ["jobs"] resources: ["jobs"]
verbs: ["get", "list", "create", "delete"] verbs: ["get", "list", "create", "delete"]
{{- end }}
- apiGroups: [""] - apiGroups: [""]
resources: ["secrets"] resources: ["secrets"]
verbs: ["get", "list", "create", "delete"] verbs: ["get", "list", "create", "delete"]

View File

@@ -1,6 +1,6 @@
{{- $containerMode := .Values.containerMode }} {{- $containerMode := .Values.containerMode }}
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeRoleBinding) }} {{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeRoleBinding) }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} {{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding kind: RoleBinding
metadata: metadata:

View File

@@ -1,6 +1,6 @@
{{- $containerMode := .Values.containerMode }} {{- $containerMode := .Values.containerMode }}
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeServiceAccount) }} {{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeServiceAccount) }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }} {{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:

View File

@@ -204,7 +204,6 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) { func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
t.Parallel() t.Parallel()
// Path to the helm chart we will test // Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err) require.NoError(t, err)
@@ -270,6 +269,72 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
} }
func TestTemplateRenderedSetServiceAccountToKubeNoVolumeMode(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "kubernetes-novolume",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_serviceaccount.yaml"})
var serviceAccount corev1.ServiceAccount
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
assert.Equal(t, namespaceName, serviceAccount.Namespace)
assert.Equal(t, "test-runners-gha-rs-kube-mode", serviceAccount.Name)
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
var role rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &role)
assert.Equal(t, namespaceName, role.Namespace)
assert.Equal(t, "test-runners-gha-rs-kube-mode", role.Name)
assert.Equal(t, "actions.github.com/cleanup-protection", role.Finalizers[0])
assert.Len(t, role.Rules, 4, "kube mode role should have 4 rules")
assert.Equal(t, "pods", role.Rules[0].Resources[0])
assert.Equal(t, "pods/exec", role.Rules[1].Resources[0])
assert.Equal(t, "pods/log", role.Rules[2].Resources[0])
assert.Equal(t, "secrets", role.Rules[3].Resources[0])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role_binding.yaml"})
var roleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &roleBinding)
assert.Equal(t, namespaceName, roleBinding.Namespace)
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.Name)
assert.Len(t, roleBinding.Subjects, 1)
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace)
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.RoleRef.Name)
assert.Equal(t, "Role", roleBinding.RoleRef.Kind)
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
expectedServiceAccountName := "test-runners-gha-rs-kube-mode"
assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName)
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
}
func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) { func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
t.Parallel() t.Parallel()
@@ -728,20 +793,20 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraInitContainers(t *testin
var ars v1alpha1.AutoscalingRunnerSet var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars) helm.UnmarshalK8SYaml(t, output, &ars)
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 3, "InitContainers should be 3") assert.Len(t, ars.Spec.Template.Spec.InitContainers, 4, "InitContainers should be 4")
assert.Equal(t, "kube-init", ars.Spec.Template.Spec.InitContainers[1].Name, "InitContainers[1] Name should be kube-init") assert.Equal(t, "kube-init", ars.Spec.Template.Spec.InitContainers[2].Name, "InitContainers[1] Name should be kube-init")
assert.Equal(t, "runner-image:latest", ars.Spec.Template.Spec.InitContainers[1].Image, "InitContainers[1] Image should be runner-image:latest") assert.Equal(t, "runner-image:latest", ars.Spec.Template.Spec.InitContainers[2].Image, "InitContainers[1] Image should be runner-image:latest")
assert.Equal(t, "sudo", ars.Spec.Template.Spec.InitContainers[1].Command[0], "InitContainers[1] Command[0] should be sudo") assert.Equal(t, "sudo", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[1] Command[0] should be sudo")
assert.Equal(t, "chown", ars.Spec.Template.Spec.InitContainers[1].Command[1], "InitContainers[1] Command[1] should be chown") assert.Equal(t, "chown", ars.Spec.Template.Spec.InitContainers[2].Command[1], "InitContainers[1] Command[1] should be chown")
assert.Equal(t, "-R", ars.Spec.Template.Spec.InitContainers[1].Command[2], "InitContainers[1] Command[2] should be -R") assert.Equal(t, "-R", ars.Spec.Template.Spec.InitContainers[2].Command[2], "InitContainers[1] Command[2] should be -R")
assert.Equal(t, "1001:123", ars.Spec.Template.Spec.InitContainers[1].Command[3], "InitContainers[1] Command[3] should be 1001:123") assert.Equal(t, "1001:123", ars.Spec.Template.Spec.InitContainers[2].Command[3], "InitContainers[1] Command[3] should be 1001:123")
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].Command[4], "InitContainers[1] Command[4] should be /home/runner/_work") assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[2].Command[4], "InitContainers[1] Command[4] should be /home/runner/_work")
assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].Name, "InitContainers[1] VolumeMounts[0] Name should be work") assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[2].VolumeMounts[0].Name, "InitContainers[1] VolumeMounts[0] Name should be work")
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].MountPath, "InitContainers[1] VolumeMounts[0] MountPath should be /home/runner/_work") assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[2].VolumeMounts[0].MountPath, "InitContainers[1] VolumeMounts[0] MountPath should be /home/runner/_work")
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Name, "InitContainers[2] Name should be ls") assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[3].Name, "InitContainers[2] Name should be ls")
assert.Equal(t, "ubuntu:latest", ars.Spec.Template.Spec.InitContainers[2].Image, "InitContainers[2] Image should be ubuntu:latest") assert.Equal(t, "ubuntu:latest", ars.Spec.Template.Spec.InitContainers[3].Image, "InitContainers[2] Image should be ubuntu:latest")
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[2] Command[0] should be ls") assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[3].Command[0], "InitContainers[2] Command[0] should be ls")
} }
func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) { func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
@@ -860,13 +925,26 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil") assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil")
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 1, "Template.Spec should have 1 init container") assert.Len(t, ars.Spec.Template.Spec.InitContainers, 2, "Template.Spec should have 2 init container")
assert.Equal(t, "init-dind-externals", ars.Spec.Template.Spec.InitContainers[0].Name) assert.Equal(t, "init-dind-externals", ars.Spec.Template.Spec.InitContainers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.InitContainers[0].Image) assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.InitContainers[0].Image)
assert.Equal(t, "cp", ars.Spec.Template.Spec.InitContainers[0].Command[0]) assert.Equal(t, "cp", ars.Spec.Template.Spec.InitContainers[0].Command[0])
assert.Equal(t, "-r /home/runner/externals/. /home/runner/tmpDir/", strings.Join(ars.Spec.Template.Spec.InitContainers[0].Args, " ")) assert.Equal(t, "-r /home/runner/externals/. /home/runner/tmpDir/", strings.Join(ars.Spec.Template.Spec.InitContainers[0].Args, " "))
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "Template.Spec should have 2 container") assert.Equal(t, "dind", ars.Spec.Template.Spec.InitContainers[1].Name)
assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.InitContainers[1].Image)
assert.True(t, *ars.Spec.Template.Spec.InitContainers[1].SecurityContext.Privileged)
assert.Len(t, ars.Spec.Template.Spec.InitContainers[1].VolumeMounts, 3, "The dind container should have 3 volume mounts, dind-sock, work and externals")
assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].Name)
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].MountPath)
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[1].Name)
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[1].MountPath)
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[2].Name)
assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[2].MountPath)
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name) assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image) assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 2, "The runner container should have 2 env vars, DOCKER_HOST and RUNNER_WAIT_FOR_DOCKER_IN_SECONDS") assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 2, "The runner container should have 2 env vars, DOCKER_HOST and RUNNER_WAIT_FOR_DOCKER_IN_SECONDS")
@@ -883,19 +961,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name) assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name)
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath) assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
assert.Equal(t, "dind", ars.Spec.Template.Spec.Containers[1].Name)
assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.Containers[1].Image)
assert.True(t, *ars.Spec.Template.Spec.Containers[1].SecurityContext.Privileged)
assert.Len(t, ars.Spec.Template.Spec.Containers[1].VolumeMounts, 3, "The dind container should have 3 volume mounts, dind-sock, work and externals")
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].Name)
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath)
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].Name)
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].MountPath)
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].Name)
assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].MountPath)
assert.Len(t, ars.Spec.Template.Spec.Volumes, 3, "Volumes should be 3") assert.Len(t, ars.Spec.Template.Spec.Volumes, 3, "Volumes should be 3")
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be dind-sock") assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be dind-sock")
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Volumes[1].Name, "Volume name should be dind-externals") assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Volumes[1].Name, "Volume name should be dind-externals")
@@ -961,6 +1026,65 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T)
assert.NotNil(t, ars.Spec.Template.Spec.Volumes[0].Ephemeral, "Template.Spec should have 1 ephemeral volume") assert.NotNil(t, ars.Spec.Template.Spec.Volumes[0].Ephemeral, "Template.Spec should have 1 ephemeral volume")
} }
func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesModeNoVolume(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "kubernetes-novolume",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, namespaceName, ars.Namespace)
assert.Equal(t, "test-runners", ars.Name)
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil")
assert.Nil(t, ars.Spec.MaxRunners, "MaxRunners should be nil")
assert.Nil(t, ars.Spec.Proxy, "Proxy should be nil")
assert.Nil(t, ars.Spec.GitHubServerTLS, "GitHubServerTLS should be nil")
assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil")
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
require.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 4, "The runner container should have 4 env vars")
assert.Equal(t, "ACTIONS_RUNNER_CONTAINER_HOOKS", ars.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, "/home/runner/k8s-novolume/index.js", ars.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "ACTIONS_RUNNER_POD_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER", ars.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "true", ars.Spec.Template.Spec.Containers[0].Env[2].Value)
assert.Equal(t, "ACTIONS_RUNNER_IMAGE", ars.Spec.Template.Spec.Containers[0].Env[3].Name)
assert.Equal(t, ars.Spec.Template.Spec.Containers[0].Image, ars.Spec.Template.Spec.Containers[0].Env[3].Value)
assert.Len(t, ars.Spec.Template.Spec.Volumes, 0, "Template.Spec should have 0 volumes")
}
func TestTemplateRenderedAutoscalingRunnerSet_ListenerPodTemplate(t *testing.T) { func TestTemplateRenderedAutoscalingRunnerSet_ListenerPodTemplate(t *testing.T) {
t.Parallel() t.Parallel()
@@ -1140,7 +1264,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
} }
t.Run("providing githubServerTLS.runnerMountPath", func(t *testing.T) { t.Run("providing githubServerTLS.runnerMountPath", func(t *testing.T) {
t.Run("mode: default", func(t *testing.T) { t.Run("mode default", func(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
Logger: logger.Discard, Logger: logger.Discard,
SetValues: map[string]string{ SetValues: map[string]string{
@@ -1158,7 +1282,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
ars := render(t, options) ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS) require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.GitHubServerTLSConfig{ expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{
@@ -1178,7 +1302,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
} }
} }
require.NotNil(t, volume) require.NotNil(t, volume)
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name) assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key) assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path) assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
@@ -1199,7 +1323,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
}) })
}) })
t.Run("mode: dind", func(t *testing.T) { t.Run("mode dind", func(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
Logger: logger.Discard, Logger: logger.Discard,
SetValues: map[string]string{ SetValues: map[string]string{
@@ -1218,7 +1342,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
ars := render(t, options) ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS) require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.GitHubServerTLSConfig{ expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{
@@ -1238,7 +1362,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
} }
} }
require.NotNil(t, volume) require.NotNil(t, volume)
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name) assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key) assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path) assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
@@ -1259,7 +1383,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
}) })
}) })
t.Run("mode: kubernetes", func(t *testing.T) { t.Run("mode kubernetes", func(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
Logger: logger.Discard, Logger: logger.Discard,
SetValues: map[string]string{ SetValues: map[string]string{
@@ -1278,7 +1402,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
ars := render(t, options) ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS) require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.GitHubServerTLSConfig{ expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{
@@ -1298,7 +1422,67 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
} }
} }
require.NotNil(t, volume) require.NotNil(t, volume)
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name) assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
assert.Contains(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
Name: "github-server-tls-cert",
MountPath: "/runner/mount/path/cert.pem",
SubPath: "cert.pem",
})
assert.Contains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
Name: "NODE_EXTRA_CA_CERTS",
Value: "/runner/mount/path/cert.pem",
})
assert.Contains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
Name: "RUNNER_UPDATE_CA_CERTS",
Value: "1",
})
})
t.Run("mode kubernetes-novolume", func(t *testing.T) {
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secrets",
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"githubServerTLS.runnerMountPath": "/runner/mount/path",
"containerMode.type": "kubernetes-novolume",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: "certs-configmap",
},
Key: "cert.pem",
},
},
}
assert.Equal(t, expected, ars.Spec.GitHubServerTLS)
var volume *corev1.Volume
for _, v := range ars.Spec.Template.Spec.Volumes {
if v.Name == "github-server-tls-cert" {
volume = &v
break
}
}
require.NotNil(t, volume)
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key) assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path) assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
@@ -1321,7 +1505,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
}) })
t.Run("without providing githubServerTLS.runnerMountPath", func(t *testing.T) { t.Run("without providing githubServerTLS.runnerMountPath", func(t *testing.T) {
t.Run("mode: default", func(t *testing.T) { t.Run("mode default", func(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
Logger: logger.Discard, Logger: logger.Discard,
SetValues: map[string]string{ SetValues: map[string]string{
@@ -1338,7 +1522,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
ars := render(t, options) ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS) require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.GitHubServerTLSConfig{ expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{
@@ -1376,7 +1560,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
}) })
}) })
t.Run("mode: dind", func(t *testing.T) { t.Run("mode dind", func(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
Logger: logger.Discard, Logger: logger.Discard,
SetValues: map[string]string{ SetValues: map[string]string{
@@ -1394,7 +1578,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
ars := render(t, options) ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS) require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.GitHubServerTLSConfig{ expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{
@@ -1432,7 +1616,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
}) })
}) })
t.Run("mode: kubernetes", func(t *testing.T) { t.Run("mode kubernetes", func(t *testing.T) {
options := &helm.Options{ options := &helm.Options{
Logger: logger.Discard, Logger: logger.Discard,
SetValues: map[string]string{ SetValues: map[string]string{
@@ -1450,7 +1634,63 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
ars := render(t, options) ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS) require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.GitHubServerTLSConfig{ expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: "certs-configmap",
},
Key: "cert.pem",
},
},
}
assert.Equal(t, expected, ars.Spec.GitHubServerTLS)
var volume *corev1.Volume
for _, v := range ars.Spec.Template.Spec.Volumes {
if v.Name == "github-server-tls-cert" {
volume = &v
break
}
}
assert.Nil(t, volume)
assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
Name: "github-server-tls-cert",
MountPath: "/runner/mount/path/cert.pem",
SubPath: "cert.pem",
})
assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
Name: "NODE_EXTRA_CA_CERTS",
Value: "/runner/mount/path/cert.pem",
})
assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
Name: "RUNNER_UPDATE_CA_CERTS",
Value: "1",
})
})
t.Run("mode kubernetes-novolume", func(t *testing.T) {
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secrets",
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"containerMode.type": "kubernetes-novolume",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{
@@ -1826,7 +2066,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinDMergePodSpec(t *testing.T) {
var ars v1alpha1.AutoscalingRunnerSet var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars) helm.UnmarshalK8SYaml(t, output, &ars)
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "There should be 2 containers") assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "There should be 1 containers")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner") assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set") assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set")
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set") assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set")
@@ -1951,40 +2191,44 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing.
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t *testing.T) { func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t *testing.T) {
t.Parallel() t.Parallel()
// Path to the helm chart we will test for _, mode := range []string{"kubernetes", "kubernetes-novolume"} {
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set") t.Run("containerMode "+mode, func(t *testing.T) {
require.NoError(t, err) // Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners" releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId()) namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{ options := &helm.Options{
Logger: logger.Discard, Logger: logger.Discard,
SetValues: map[string]string{ SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions", "githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345", "githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc", "controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system", "controllerServiceAccount.namespace": "arc-system",
"containerMode.type": "kubernetes", "containerMode.type": mode,
}, },
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName), KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
} }
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"}) output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet) helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
annotationValues := map[string]string{ annotationValues := map[string]string{
actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-rs-github-secret", actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-rs-github-secret",
actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-rs-manager", actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-rs-manager",
actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-rs-manager", actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-rs-manager",
actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-rs-kube-mode", actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-rs-kube-mode",
actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-rs-kube-mode", actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-rs-kube-mode",
actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-rs-kube-mode", actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-rs-kube-mode",
} }
for annotation, value := range annotationValues { for annotation, value := range annotationValues {
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation)) assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
}
})
} }
} }
@@ -2416,6 +2660,21 @@ func TestNamespaceOverride(t *testing.T) {
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace), KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
}, },
}, },
"kube_novolume_mode_role": {
file: "kube_mode_role.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"containerMode.type": "kubernetes-novolume",
"controllerServiceAccount.name": "foo",
"controllerServiceAccount.namespace": "bar",
"githubConfigSecret.github_token": "gh_token12345",
"githubConfigUrl": "https://github.com",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
"kube_mode_role_binding": { "kube_mode_role_binding": {
file: "kube_mode_role_binding.yaml", file: "kube_mode_role_binding.yaml",
options: &helm.Options{ options: &helm.Options{
@@ -2431,6 +2690,21 @@ func TestNamespaceOverride(t *testing.T) {
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace), KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
}, },
}, },
"kube_novolume_mode_role_binding": {
file: "kube_mode_role_binding.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"containerMode.type": "kubernetes-novolume",
"controllerServiceAccount.name": "foo",
"controllerServiceAccount.namespace": "bar",
"githubConfigSecret.github_token": "gh_token12345",
"githubConfigUrl": "https://github.com",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
"kube_mode_serviceaccount": { "kube_mode_serviceaccount": {
file: "kube_mode_serviceaccount.yaml", file: "kube_mode_serviceaccount.yaml",
options: &helm.Options{ options: &helm.Options{
@@ -2446,6 +2720,21 @@ func TestNamespaceOverride(t *testing.T) {
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace), KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
}, },
}, },
"kube_novolume_mode_serviceaccount": {
file: "kube_mode_serviceaccount.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"containerMode.type": "kubernetes-novolume",
"controllerServiceAccount.name": "foo",
"controllerServiceAccount.namespace": "bar",
"githubConfigSecret.github_token": "gh_token12345",
"githubConfigUrl": "https://github.com",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
} }
for name, tc := range tt { for name, tc := range tt {
@@ -2468,3 +2757,43 @@ func TestNamespaceOverride(t *testing.T) {
}) })
} }
} }
func TestAutoscalingRunnerSetCustomAnnotationsAndLabelsApplied(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"annotations.actions\\.github\\.com/vault": "azure_key_vault",
"annotations.actions\\.github\\.com/cleanup-manager-role-name": "not-propagated",
"labels.custom": "custom",
"labels.app\\.kubernetes\\.io/component": "not-propagated",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
vault := autoscalingRunnerSet.Annotations["actions.github.com/vault"]
assert.Equal(t, "azure_key_vault", vault)
custom := autoscalingRunnerSet.Labels["custom"]
assert.Equal(t, "custom", custom)
assert.NotEqual(t, "not-propagated", autoscalingRunnerSet.Annotations["actions.github.com/cleanup-manager-role-name"])
assert.NotEqual(t, "not-propagated", autoscalingRunnerSet.Labels["app.kubernetes.io/component"])
}

View File

@@ -1,12 +1,12 @@
## githubConfigUrl is the GitHub url for where you want to configure runners ## githubConfigUrl is the GitHub url for where you want to configure runners
## ex: https://github.com/myorg/myrepo or https://github.com/myorg ## ex: https://github.com/myorg/myrepo or https://github.com/myorg or https://github.com/enterprises/myenterprise
githubConfigUrl: "" githubConfigUrl: ""
## githubConfigSecret is the k8s secret information to use when authenticating via the GitHub API. ## githubConfigSecret is the k8s secret information to use when authenticating via the GitHub API.
## You can choose to supply: ## You can choose to supply:
## A) a PAT token, ## A) a PAT token,
## B) a GitHub App, or ## B) a GitHub App, or
## C) a pre-defined Kubernetes secret. ## C) a pre-defined secret.
## The syntax for each of these variations is documented below. ## The syntax for each of these variations is documented below.
## (Variation A) When using a PAT token, the syntax is as follows: ## (Variation A) When using a PAT token, the syntax is as follows:
githubConfigSecret: githubConfigSecret:
@@ -17,6 +17,7 @@ githubConfigSecret:
## (Variation B) When using a GitHub App, the syntax is as follows: ## (Variation B) When using a GitHub App, the syntax is as follows:
# githubConfigSecret: # githubConfigSecret:
# # NOTE: IDs MUST be strings, use quotes # # NOTE: IDs MUST be strings, use quotes
# # The github_app_id can be an app_id or the client_id
# github_app_id: "" # github_app_id: ""
# github_app_installation_id: "" # github_app_installation_id: ""
# github_app_private_key: | # github_app_private_key: |
@@ -27,8 +28,11 @@ githubConfigSecret:
# . # .
# private key line N # private key line N
# #
## (Variation C) When using a pre-defined Kubernetes secret in the same namespace that the gha-runner-scale-set is going to deploy, ## (Variation C) When using a pre-defined secret.
## the syntax is as follows: ## The secret can be pulled either directly from Kubernetes, or from the vault, depending on configuration.
## Kubernetes secret in the same namespace that the gha-runner-scale-set is going to deploy.
## On the other hand, if the vault is configured, secret name will be used to fetch the app configuration.
## The syntax is as follows:
# githubConfigSecret: pre-defined-secret # githubConfigSecret: pre-defined-secret
## Notes on using pre-defined Kubernetes secrets: ## Notes on using pre-defined Kubernetes secrets:
## You need to make sure your predefined secret has all the required secret data set properly. ## You need to make sure your predefined secret has all the required secret data set properly.
@@ -84,6 +88,26 @@ githubConfigSecret:
# key: ca.crt # key: ca.crt
# runnerMountPath: /usr/local/share/ca-certificates/ # runnerMountPath: /usr/local/share/ca-certificates/
# keyVault:
# Available values: "azure_key_vault"
# type: ""
# Configuration related to azure key vault
# azure_key_vault:
# url: ""
# client_id: ""
# tenant_id: ""
# certificate_path: ""
# proxy:
# http:
# url: http://proxy.com:1234
# credentialSecretRef: proxy-auth # a secret with `username` and `password` keys
# https:
# url: http://proxy.com:1234
# credentialSecretRef: proxy-auth # a secret with `username` and `password` keys
# noProxy:
# - example.com
# - example.org
## Container mode is an object that provides out-of-box configuration ## Container mode is an object that provides out-of-box configuration
## for dind and kubernetes mode. Template will be modified as documented under the ## for dind and kubernetes mode. Template will be modified as documented under the
## template object. ## template object.
@@ -91,7 +115,7 @@ githubConfigSecret:
## If any customization is required for dind or kubernetes mode, containerMode should remain ## If any customization is required for dind or kubernetes mode, containerMode should remain
## empty, and configuration should be applied to the template. ## empty, and configuration should be applied to the template.
# containerMode: # containerMode:
# type: "dind" ## type can be set to dind or kubernetes # type: "dind" ## type can be set to "dind", "kubernetes", or "kubernetes-novolume"
# ## the following is required when containerMode.type=kubernetes # ## the following is required when containerMode.type=kubernetes
# kubernetesModeWorkVolumeClaim: # kubernetesModeWorkVolumeClaim:
# accessModes: ["ReadWriteOnce"] # accessModes: ["ReadWriteOnce"]
@@ -130,7 +154,7 @@ githubConfigSecret:
# counters: # counters:
# gha_started_jobs_total: # gha_started_jobs_total:
# labels: # labels:
# ["repository", "organization", "enterprise", "job_name", "event_name"] # ["repository", "organization", "enterprise", "job_name", "event_name", "job_workflow_ref", "job_workflow_name", "job_workflow_target"]
# gha_completed_jobs_total: # gha_completed_jobs_total:
# labels: # labels:
# [ # [
@@ -140,6 +164,9 @@ githubConfigSecret:
# "job_name", # "job_name",
# "event_name", # "event_name",
# "job_result", # "job_result",
# "job_workflow_ref",
# "job_workflow_name",
# "job_workflow_target",
# ] # ]
# gauges: # gauges:
# gha_assigned_jobs: # gha_assigned_jobs:
@@ -161,7 +188,7 @@ githubConfigSecret:
# histograms: # histograms:
# gha_job_startup_duration_seconds: # gha_job_startup_duration_seconds:
# labels: # labels:
# ["repository", "organization", "enterprise", "job_name", "event_name"] # ["repository", "organization", "enterprise", "job_name", "event_name","job_workflow_ref", "job_workflow_name", "job_workflow_target"]
# buckets: # buckets:
# [ # [
# 0.01, # 0.01,
@@ -219,6 +246,9 @@ githubConfigSecret:
# "job_name", # "job_name",
# "event_name", # "event_name",
# "job_result", # "job_result",
# "job_workflow_ref",
# "job_workflow_name",
# "job_workflow_target"
# ] # ]
# buckets: # buckets:
# [ # [
@@ -283,18 +313,6 @@ template:
## volumeMounts: ## volumeMounts:
## - name: dind-externals ## - name: dind-externals
## mountPath: /home/runner/tmpDir ## mountPath: /home/runner/tmpDir
## containers:
## - name: runner
## image: ghcr.io/actions/actions-runner:latest
## command: ["/home/runner/run.sh"]
## env:
## - name: DOCKER_HOST
## value: unix:///var/run/docker.sock
## volumeMounts:
## - name: work
## mountPath: /home/runner/_work
## - name: dind-sock
## mountPath: /var/run
## - name: dind ## - name: dind
## image: docker:dind ## image: docker:dind
## args: ## args:
@@ -306,6 +324,15 @@ template:
## value: "123" ## value: "123"
## securityContext: ## securityContext:
## privileged: true ## privileged: true
## restartPolicy: Always
## startupProbe:
## exec:
## command:
## - docker
## - info
## initialDelaySeconds: 0
## failureThreshold: 24
## periodSeconds: 5
## volumeMounts: ## volumeMounts:
## - name: work ## - name: work
## mountPath: /home/runner/_work ## mountPath: /home/runner/_work
@@ -313,6 +340,20 @@ template:
## mountPath: /var/run ## mountPath: /var/run
## - name: dind-externals ## - name: dind-externals
## mountPath: /home/runner/externals ## mountPath: /home/runner/externals
## containers:
## - name: runner
## image: ghcr.io/actions/actions-runner:latest
## command: ["/home/runner/run.sh"]
## env:
## - name: DOCKER_HOST
## value: unix:///var/run/docker.sock
## - name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS
## value: "120"
## volumeMounts:
## - name: work
## mountPath: /home/runner/_work
## - name: dind-sock
## mountPath: /var/run
## volumes: ## volumes:
## - name: work ## - name: work
## emptyDir: {} ## emptyDir: {}
@@ -350,6 +391,25 @@ template:
## resources: ## resources:
## requests: ## requests:
## storage: 1Gi ## storage: 1Gi
######################################################################################################
## with containerMode.type=kubernetes-novolume, we will populate the template.spec with following pod spec
## template:
## spec:
## containers:
## - name: runner
## image: ghcr.io/actions/actions-runner:latest
## command: ["/home/runner/run.sh"]
## env:
## - name: ACTIONS_RUNNER_CONTAINER_HOOKS
## value: /home/runner/k8s-novolume/index.js
## - name: ACTIONS_RUNNER_POD_NAME
## valueFrom:
## fieldRef:
## fieldPath: metadata.name
## - name: ACTIONS_RUNNER_IMAGE
## value: ghcr.io/actions/actions-runner:latest # should match the runnerimage
## - name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER
## value: "true"
spec: spec:
containers: containers:
- name: runner - name: runner

View File

@@ -17,7 +17,7 @@ import (
// App is responsible for initializing required components and running the app. // App is responsible for initializing required components and running the app.
type App struct { type App struct {
// configured fields // configured fields
config config.Config config *config.Config
logger logr.Logger logger logr.Logger
// initialized fields // initialized fields
@@ -38,8 +38,12 @@ type Worker interface {
} }
func New(config config.Config) (*App, error) { func New(config config.Config) (*App, error) {
if err := config.Validate(); err != nil {
return nil, fmt.Errorf("failed to validate config: %w", err)
}
app := &App{ app := &App{
config: config, config: &config,
} }
ghConfig, err := actions.ParseGitHubConfigFromURL(config.ConfigureUrl) ghConfig, err := actions.ParseGitHubConfigFromURL(config.ConfigureUrl)
@@ -69,8 +73,8 @@ func New(config config.Config) (*App, error) {
Repository: ghConfig.Repository, Repository: ghConfig.Repository,
ServerAddr: config.MetricsAddr, ServerAddr: config.MetricsAddr,
ServerEndpoint: config.MetricsEndpoint, ServerEndpoint: config.MetricsEndpoint,
Metrics: config.Metrics,
Logger: app.logger.WithName("metrics exporter"), Logger: app.logger.WithName("metrics exporter"),
Metrics: *config.Metrics,
}) })
} }

View File

@@ -1,6 +1,7 @@
package config package config
import ( import (
"context"
"crypto/x509" "crypto/x509"
"encoding/json" "encoding/json"
"fmt" "fmt"
@@ -9,19 +10,26 @@ import (
"os" "os"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/build" "github.com/actions/actions-runner-controller/build"
"github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging" "github.com/actions/actions-runner-controller/logging"
"github.com/actions/actions-runner-controller/vault"
"github.com/actions/actions-runner-controller/vault/azurekeyvault"
"github.com/go-logr/logr" "github.com/go-logr/logr"
"golang.org/x/net/http/httpproxy" "golang.org/x/net/http/httpproxy"
) )
type Config struct { type Config struct {
ConfigureUrl string `json:"configure_url"` ConfigureUrl string `json:"configure_url"`
AppID int64 `json:"app_id"` VaultType vault.VaultType `json:"vault_type"`
AppInstallationID int64 `json:"app_installation_id"` VaultLookupKey string `json:"vault_lookup_key"`
AppPrivateKey string `json:"app_private_key"` // If the VaultType is set to "azure_key_vault", this field must be populated.
Token string `json:"token"` AzureKeyVaultConfig *azurekeyvault.Config `json:"azure_key_vault,omitempty"`
// AppConfig contains the GitHub App configuration.
// It is initially set to nil if VaultType is set.
// Otherwise, it is populated with the GitHub App credentials from the GitHub secret.
*appconfig.AppConfig
EphemeralRunnerSetNamespace string `json:"ephemeral_runner_set_namespace"` EphemeralRunnerSetNamespace string `json:"ephemeral_runner_set_namespace"`
EphemeralRunnerSetName string `json:"ephemeral_runner_set_name"` EphemeralRunnerSetName string `json:"ephemeral_runner_set_name"`
MaxRunners int `json:"max_runners"` MaxRunners int `json:"max_runners"`
@@ -36,23 +44,58 @@ type Config struct {
Metrics *v1alpha1.MetricsConfig `json:"metrics"` Metrics *v1alpha1.MetricsConfig `json:"metrics"`
} }
func Read(path string) (Config, error) { func Read(ctx context.Context, configPath string) (*Config, error) {
f, err := os.Open(path) f, err := os.Open(configPath)
if err != nil { if err != nil {
return Config{}, err return nil, err
} }
defer f.Close() defer f.Close()
var config Config var config Config
if err := json.NewDecoder(f).Decode(&config); err != nil { if err := json.NewDecoder(f).Decode(&config); err != nil {
return Config{}, fmt.Errorf("failed to decode config: %w", err) return nil, fmt.Errorf("failed to decode config: %w", err)
} }
var vault vault.Vault
switch config.VaultType {
case "":
if err := config.Validate(); err != nil {
return nil, fmt.Errorf("failed to validate configuration: %v", err)
}
return &config, nil
case "azure_key_vault":
akv, err := azurekeyvault.New(*config.AzureKeyVaultConfig)
if err != nil {
return nil, fmt.Errorf("failed to create Azure Key Vault client: %w", err)
}
vault = akv
default:
return nil, fmt.Errorf("unsupported vault type: %s", config.VaultType)
}
appConfigRaw, err := vault.GetSecret(ctx, config.VaultLookupKey)
if err != nil {
return nil, fmt.Errorf("failed to get app config from vault: %w", err)
}
appConfig, err := appconfig.FromJSONString(appConfigRaw)
if err != nil {
return nil, fmt.Errorf("failed to read app config from string: %v", err)
}
config.AppConfig = appConfig
if err := config.Validate(); err != nil { if err := config.Validate(); err != nil {
return Config{}, fmt.Errorf("failed to validate config: %w", err) return nil, fmt.Errorf("config validation failed: %w", err)
} }
return config, nil if ctx.Err() != nil {
return nil, ctx.Err()
}
return &config, nil
} }
// Validate checks the configuration for errors. // Validate checks the configuration for errors.
@@ -62,26 +105,30 @@ func (c *Config) Validate() error {
} }
if len(c.EphemeralRunnerSetNamespace) == 0 || len(c.EphemeralRunnerSetName) == 0 { if len(c.EphemeralRunnerSetNamespace) == 0 || len(c.EphemeralRunnerSetName) == 0 {
return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName) return fmt.Errorf("EphemeralRunnerSetNamespace %q or EphemeralRunnerSetName %q is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
} }
if c.RunnerScaleSetId == 0 { if c.RunnerScaleSetId == 0 {
return fmt.Errorf("RunnerScaleSetId '%d' is missing", c.RunnerScaleSetId) return fmt.Errorf(`RunnerScaleSetId "%d" is missing`, c.RunnerScaleSetId)
} }
if c.MaxRunners < c.MinRunners { if c.MaxRunners < c.MinRunners {
return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", c.MinRunners, c.MaxRunners) return fmt.Errorf(`MinRunners "%d" cannot be greater than MaxRunners "%d"`, c.MinRunners, c.MaxRunners)
} }
hasToken := len(c.Token) > 0 if c.VaultType != "" {
hasPrivateKeyConfig := c.AppID > 0 && c.AppPrivateKey != "" if err := c.VaultType.Validate(); err != nil {
return fmt.Errorf("VaultType validation failed: %w", err)
if !hasToken && !hasPrivateKeyConfig { }
return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey)) if c.VaultLookupKey == "" {
return fmt.Errorf("VaultLookupKey is required when VaultType is set to %q", c.VaultType)
}
} }
if hasToken && hasPrivateKeyConfig { if c.VaultType == "" && c.VaultLookupKey == "" {
return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey)) if err := c.AppConfig.Validate(); err != nil {
return fmt.Errorf("AppConfig validation failed: %w", err)
}
} }
return nil return nil

View File

@@ -9,6 +9,7 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config" "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/github/actions/testserver" "github.com/actions/actions-runner-controller/github/actions/testserver"
@@ -53,7 +54,9 @@ func TestCustomerServerRootCA(t *testing.T) {
config := config.Config{ config := config.Config{
ConfigureUrl: server.ConfigURLForOrg("myorg"), ConfigureUrl: server.ConfigURLForOrg("myorg"),
ServerRootCA: certsString, ServerRootCA: certsString,
Token: "token", AppConfig: &appconfig.AppConfig{
Token: "token",
},
} }
client, err := config.ActionsClient(logr.Discard()) client, err := config.ActionsClient(logr.Discard())
@@ -80,7 +83,9 @@ func TestProxySettings(t *testing.T) {
config := config.Config{ config := config.Config{
ConfigureUrl: "https://github.com/org/repo", ConfigureUrl: "https://github.com/org/repo",
Token: "token", AppConfig: &appconfig.AppConfig{
Token: "token",
},
} }
client, err := config.ActionsClient(logr.Discard()) client, err := config.ActionsClient(logr.Discard())
@@ -110,7 +115,9 @@ func TestProxySettings(t *testing.T) {
config := config.Config{ config := config.Config{
ConfigureUrl: "https://github.com/org/repo", ConfigureUrl: "https://github.com/org/repo",
Token: "token", AppConfig: &appconfig.AppConfig{
Token: "token",
},
} }
client, err := config.ActionsClient(logr.Discard(), actions.WithRetryMax(0)) client, err := config.ActionsClient(logr.Discard(), actions.WithRetryMax(0))
@@ -145,7 +152,9 @@ func TestProxySettings(t *testing.T) {
config := config.Config{ config := config.Config{
ConfigureUrl: "https://github.com/org/repo", ConfigureUrl: "https://github.com/org/repo",
Token: "token", AppConfig: &appconfig.AppConfig{
Token: "token",
},
} }
client, err := config.ActionsClient(logr.Discard()) client, err := config.ActionsClient(logr.Discard())

View File

@@ -1,92 +0,0 @@
package config
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestConfigValidationMinMax(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 5,
MaxRunners: 2,
Token: "token",
}
err := config.Validate()
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
}
func TestConfigValidationMissingToken(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationAppKey(t *testing.T) {
config := &Config{
AppID: 1,
AppInstallationID: 10,
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
config := &Config{
AppID: 1,
AppInstallationID: 10,
AppPrivateKey: "asdf",
Token: "asdf",
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidation(t *testing.T) {
config := &Config{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
Token: "asdf",
}
err := config.Validate()
assert.NoError(t, err, "Expected no error")
}
func TestConfigValidationConfigUrl(t *testing.T) {
config := &Config{
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
}

View File

@@ -0,0 +1,170 @@
package config
import (
"testing"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/vault"
"github.com/stretchr/testify/assert"
)
func TestConfigValidationMinMax(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 5,
MaxRunners: 2,
AppConfig: &appconfig.AppConfig{
Token: "token",
},
}
err := config.Validate()
assert.ErrorContains(t, err, `MinRunners "5" cannot be greater than MaxRunners "2"`, "Expected error about MinRunners > MaxRunners")
}
func TestConfigValidationMissingToken(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: missing app config"
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationAppKey(t *testing.T) {
t.Parallel()
t.Run("app id integer", func(t *testing.T) {
t.Parallel()
config := &Config{
AppConfig: &appconfig.AppConfig{
AppID: "1",
AppInstallationID: 10,
},
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: no credentials provided: either a PAT or GitHub App credentials should be provided"
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
})
t.Run("app id as client id", func(t *testing.T) {
t.Parallel()
config := &Config{
AppConfig: &appconfig.AppConfig{
AppID: "Iv23f8doAlphaNumer1c",
AppInstallationID: 10,
},
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: no credentials provided: either a PAT or GitHub App credentials should be provided"
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
})
}
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
config := &Config{
AppConfig: &appconfig.AppConfig{
AppID: "1",
AppInstallationID: 10,
AppPrivateKey: "asdf",
Token: "asdf",
},
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: both PAT and GitHub App credentials provided. should only provide one"
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidation(t *testing.T) {
config := &Config{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
AppConfig: &appconfig.AppConfig{
Token: "asdf",
},
}
err := config.Validate()
assert.NoError(t, err, "Expected no error")
}
func TestConfigValidationConfigUrl(t *testing.T) {
config := &Config{
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
}
func TestConfigValidationWithVaultConfig(t *testing.T) {
t.Run("valid", func(t *testing.T) {
config := &Config{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
VaultType: vault.VaultTypeAzureKeyVault,
VaultLookupKey: "testkey",
}
err := config.Validate()
assert.NoError(t, err, "Expected no error for valid vault type")
})
t.Run("invalid vault type", func(t *testing.T) {
config := &Config{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
VaultType: vault.VaultType("invalid_vault_type"),
VaultLookupKey: "testkey",
}
err := config.Validate()
assert.ErrorContains(t, err, `unknown vault type: "invalid_vault_type"`, "Expected error for invalid vault type")
})
t.Run("vault type set without lookup key", func(t *testing.T) {
config := &Config{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
VaultType: vault.VaultTypeAzureKeyVault,
VaultLookupKey: "",
}
err := config.Validate()
assert.ErrorContains(t, err, `VaultLookupKey is required when VaultType is set to "azure_key_vault"`, "Expected error for vault type without lookup key")
})
}

View File

@@ -361,7 +361,7 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
return nil, fmt.Errorf("failed to decode job available: %w", err) return nil, fmt.Errorf("failed to decode job available: %w", err)
} }
l.logger.Info("Job available message received", "jobId", jobAvailable.RunnerRequestId) l.logger.Info("Job available message received", "jobId", jobAvailable.JobID)
parsedMsg.jobsAvailable = append(parsedMsg.jobsAvailable, &jobAvailable) parsedMsg.jobsAvailable = append(parsedMsg.jobsAvailable, &jobAvailable)
case messageTypeJobAssigned: case messageTypeJobAssigned:
@@ -370,14 +370,14 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
return nil, fmt.Errorf("failed to decode job assigned: %w", err) return nil, fmt.Errorf("failed to decode job assigned: %w", err)
} }
l.logger.Info("Job assigned message received", "jobId", jobAssigned.RunnerRequestId) l.logger.Info("Job assigned message received", "jobId", jobAssigned.JobID)
case messageTypeJobStarted: case messageTypeJobStarted:
var jobStarted actions.JobStarted var jobStarted actions.JobStarted
if err := json.Unmarshal(msg, &jobStarted); err != nil { if err := json.Unmarshal(msg, &jobStarted); err != nil {
return nil, fmt.Errorf("could not decode job started message. %w", err) return nil, fmt.Errorf("could not decode job started message. %w", err)
} }
l.logger.Info("Job started message received.", "RequestId", jobStarted.RunnerRequestId, "RunnerId", jobStarted.RunnerId) l.logger.Info("Job started message received.", "JobID", jobStarted.JobID, "RunnerId", jobStarted.RunnerID)
parsedMsg.jobsStarted = append(parsedMsg.jobsStarted, &jobStarted) parsedMsg.jobsStarted = append(parsedMsg.jobsStarted, &jobStarted)
case messageTypeJobCompleted: case messageTypeJobCompleted:
@@ -386,7 +386,13 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
return nil, fmt.Errorf("failed to decode job completed: %w", err) return nil, fmt.Errorf("failed to decode job completed: %w", err)
} }
l.logger.Info("Job completed message received.", "RequestId", jobCompleted.RunnerRequestId, "Result", jobCompleted.Result, "RunnerId", jobCompleted.RunnerId, "RunnerName", jobCompleted.RunnerName) l.logger.Info(
"Job completed message received.",
"JobID", jobCompleted.JobID,
"Result", jobCompleted.Result,
"RunnerId", jobCompleted.RunnerId,
"RunnerName", jobCompleted.RunnerName,
)
parsedMsg.jobsCompleted = append(parsedMsg.jobsCompleted, &jobCompleted) parsedMsg.jobsCompleted = append(parsedMsg.jobsCompleted, &jobCompleted)
default: default:
@@ -400,7 +406,7 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) { func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
ids := make([]int64, 0, len(jobsAvailable)) ids := make([]int64, 0, len(jobsAvailable))
for _, job := range jobsAvailable { for _, job := range jobsAvailable {
ids = append(ids, job.RunnerRequestId) ids = append(ids, job.RunnerRequestID)
} }
l.logger.Info("Acquiring jobs", "count", len(ids), "requestIds", fmt.Sprint(ids)) l.logger.Info("Acquiring jobs", "count", len(ids), "requestIds", fmt.Sprint(ids))

View File

@@ -627,17 +627,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
availableJobs := []*actions.JobAvailable{ availableJobs := []*actions.JobAvailable{
{ {
JobMessageBase: actions.JobMessageBase{ JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1, RunnerRequestID: 1,
}, },
}, },
{ {
JobMessageBase: actions.JobMessageBase{ JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2, RunnerRequestID: 2,
}, },
}, },
{ {
JobMessageBase: actions.JobMessageBase{ JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3, RunnerRequestID: 3,
}, },
}, },
} }
@@ -678,17 +678,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
availableJobs := []*actions.JobAvailable{ availableJobs := []*actions.JobAvailable{
{ {
JobMessageBase: actions.JobMessageBase{ JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1, RunnerRequestID: 1,
}, },
}, },
{ {
JobMessageBase: actions.JobMessageBase{ JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2, RunnerRequestID: 2,
}, },
}, },
{ {
JobMessageBase: actions.JobMessageBase{ JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3, RunnerRequestID: 3,
}, },
}, },
} }
@@ -724,17 +724,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
availableJobs := []*actions.JobAvailable{ availableJobs := []*actions.JobAvailable{
{ {
JobMessageBase: actions.JobMessageBase{ JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1, RunnerRequestID: 1,
}, },
}, },
{ {
JobMessageBase: actions.JobMessageBase{ JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2, RunnerRequestID: 2,
}, },
}, },
{ {
JobMessageBase: actions.JobMessageBase{ JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3, RunnerRequestID: 3,
}, },
}, },
} }
@@ -809,17 +809,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
availableJobs := []*actions.JobAvailable{ availableJobs := []*actions.JobAvailable{
{ {
JobMessageBase: actions.JobMessageBase{ JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1, RunnerRequestID: 1,
}, },
}, },
{ {
JobMessageBase: actions.JobMessageBase{ JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2, RunnerRequestID: 2,
}, },
}, },
{ {
JobMessageBase: actions.JobMessageBase{ JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3, RunnerRequestID: 3,
}, },
}, },
} }
@@ -881,7 +881,7 @@ func TestListener_parseMessage(t *testing.T) {
JobMessageType: actions.JobMessageType{ JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAvailable, MessageType: messageTypeJobAvailable,
}, },
RunnerRequestId: 1, RunnerRequestID: 1,
}, },
}, },
{ {
@@ -890,7 +890,7 @@ func TestListener_parseMessage(t *testing.T) {
JobMessageType: actions.JobMessageType{ JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAvailable, MessageType: messageTypeJobAvailable,
}, },
RunnerRequestId: 2, RunnerRequestID: 2,
}, },
}, },
} }
@@ -904,7 +904,7 @@ func TestListener_parseMessage(t *testing.T) {
JobMessageType: actions.JobMessageType{ JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAssigned, MessageType: messageTypeJobAssigned,
}, },
RunnerRequestId: 3, RunnerRequestID: 3,
}, },
}, },
{ {
@@ -912,7 +912,7 @@ func TestListener_parseMessage(t *testing.T) {
JobMessageType: actions.JobMessageType{ JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAssigned, MessageType: messageTypeJobAssigned,
}, },
RunnerRequestId: 4, RunnerRequestID: 4,
}, },
}, },
} }
@@ -926,9 +926,9 @@ func TestListener_parseMessage(t *testing.T) {
JobMessageType: actions.JobMessageType{ JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobStarted, MessageType: messageTypeJobStarted,
}, },
RunnerRequestId: 5, RunnerRequestID: 5,
}, },
RunnerId: 2, RunnerID: 2,
RunnerName: "runner2", RunnerName: "runner2",
}, },
} }
@@ -942,7 +942,7 @@ func TestListener_parseMessage(t *testing.T) {
JobMessageType: actions.JobMessageType{ JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted, MessageType: messageTypeJobCompleted,
}, },
RunnerRequestId: 6, RunnerRequestID: 6,
}, },
Result: "success", Result: "success",
RunnerId: 1, RunnerId: 1,

View File

@@ -123,9 +123,9 @@ func TestHandleMessageMetrics(t *testing.T) {
JobMessageType: actions.JobMessageType{ JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobStarted, MessageType: messageTypeJobStarted,
}, },
RunnerRequestId: 8, RunnerRequestID: 8,
}, },
RunnerId: 3, RunnerID: 3,
RunnerName: "runner3", RunnerName: "runner3",
}, },
} }
@@ -139,7 +139,7 @@ func TestHandleMessageMetrics(t *testing.T) {
JobMessageType: actions.JobMessageType{ JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted, MessageType: messageTypeJobCompleted,
}, },
RunnerRequestId: 6, RunnerRequestID: 6,
}, },
Result: "success", Result: "success",
RunnerId: 1, RunnerId: 1,
@@ -150,7 +150,7 @@ func TestHandleMessageMetrics(t *testing.T) {
JobMessageType: actions.JobMessageType{ JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted, MessageType: messageTypeJobCompleted,
}, },
RunnerRequestId: 7, RunnerRequestID: 7,
}, },
Result: "success", Result: "success",
RunnerId: 2, RunnerId: 2,

View File

@@ -13,26 +13,27 @@ import (
) )
func main() { func main() {
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop()
configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH") configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
if !ok { if !ok {
fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n") fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
os.Exit(1) os.Exit(1)
} }
config, err := config.Read(configPath)
config, err := config.Read(ctx, configPath)
if err != nil { if err != nil {
log.Printf("Failed to read config: %v", err) log.Printf("Failed to read config: %v", err)
os.Exit(1) os.Exit(1)
} }
app, err := app.New(config) app, err := app.New(*config)
if err != nil { if err != nil {
log.Printf("Failed to initialize app: %v", err) log.Printf("Failed to initialize app: %v", err)
os.Exit(1) os.Exit(1)
} }
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop()
if err := app.Run(ctx); err != nil { if err := app.Run(ctx); err != nil {
log.Printf("Application returned an error: %v", err) log.Printf("Application returned an error: %v", err)
os.Exit(1) os.Exit(1)

View File

@@ -21,6 +21,9 @@ const (
labelKeyOrganization = "organization" labelKeyOrganization = "organization"
labelKeyRepository = "repository" labelKeyRepository = "repository"
labelKeyJobName = "job_name" labelKeyJobName = "job_name"
labelKeyJobWorkflowRef = "job_workflow_ref"
labelKeyJobWorkflowName = "job_workflow_name"
labelKeyJobWorkflowTarget = "job_workflow_target"
labelKeyEventName = "event_name" labelKeyEventName = "event_name"
labelKeyJobResult = "job_result" labelKeyJobResult = "job_result"
) )
@@ -74,12 +77,16 @@ var metricsHelp = metricsHelpRegistry{
} }
func (e *exporter) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels { func (e *exporter) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
workflowRefInfo := ParseWorkflowRef(jobBase.JobWorkflowRef)
return prometheus.Labels{ return prometheus.Labels{
labelKeyEnterprise: e.scaleSetLabels[labelKeyEnterprise], labelKeyEnterprise: e.scaleSetLabels[labelKeyEnterprise],
labelKeyOrganization: jobBase.OwnerName, labelKeyOrganization: jobBase.OwnerName,
labelKeyRepository: jobBase.RepositoryName, labelKeyRepository: jobBase.RepositoryName,
labelKeyJobName: jobBase.JobDisplayName, labelKeyJobName: jobBase.JobDisplayName,
labelKeyEventName: jobBase.EventName, labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
labelKeyJobWorkflowName: workflowRefInfo.Name,
labelKeyJobWorkflowTarget: workflowRefInfo.Target,
labelKeyEventName: jobBase.EventName,
} }
} }
@@ -152,13 +159,148 @@ type ExporterConfig struct {
ServerAddr string ServerAddr string
ServerEndpoint string ServerEndpoint string
Logger logr.Logger Logger logr.Logger
Metrics v1alpha1.MetricsConfig Metrics *v1alpha1.MetricsConfig
}
var defaultMetrics = v1alpha1.MetricsConfig{
Counters: map[string]*v1alpha1.CounterMetric{
MetricStartedJobsTotal: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyJobName,
labelKeyEventName,
},
},
MetricCompletedJobsTotal: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyJobName,
labelKeyEventName,
labelKeyJobResult,
},
},
},
Gauges: map[string]*v1alpha1.GaugeMetric{
MetricAssignedJobs: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
MetricRunningJobs: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
MetricRegisteredRunners: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
MetricBusyRunners: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
MetricMinRunners: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
MetricMaxRunners: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
MetricDesiredRunners: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
MetricIdleRunners: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
},
Histograms: map[string]*v1alpha1.HistogramMetric{
MetricJobStartupDurationSeconds: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyJobName,
labelKeyEventName,
},
Buckets: defaultRuntimeBuckets,
},
MetricJobExecutionDurationSeconds: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyJobName,
labelKeyEventName,
labelKeyJobResult,
},
Buckets: defaultRuntimeBuckets,
},
},
}
func (e *ExporterConfig) defaults() {
if e.ServerAddr == "" {
e.ServerAddr = ":8080"
}
if e.ServerEndpoint == "" {
e.ServerEndpoint = "/metrics"
}
if e.Metrics == nil {
defaultMetrics := defaultMetrics
e.Metrics = &defaultMetrics
}
} }
func NewExporter(config ExporterConfig) ServerExporter { func NewExporter(config ExporterConfig) ServerExporter {
config.defaults()
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
metrics := installMetrics(config.Metrics, reg, config.Logger) metrics := installMetrics(*config.Metrics, reg, config.Logger)
mux := http.NewServeMux() mux := http.NewServeMux()
mux.Handle( mux.Handle(
@@ -287,7 +429,7 @@ func (e *exporter) ListenAndServe(ctx context.Context) error {
} }
func (e *exporter) setGauge(name string, allLabels prometheus.Labels, val float64) { func (e *exporter) setGauge(name string, allLabels prometheus.Labels, val float64) {
m, ok := e.metrics.gauges[name] m, ok := e.gauges[name]
if !ok { if !ok {
return return
} }
@@ -299,7 +441,7 @@ func (e *exporter) setGauge(name string, allLabels prometheus.Labels, val float6
} }
func (e *exporter) incCounter(name string, allLabels prometheus.Labels) { func (e *exporter) incCounter(name string, allLabels prometheus.Labels) {
m, ok := e.metrics.counters[name] m, ok := e.counters[name]
if !ok { if !ok {
return return
} }
@@ -311,7 +453,7 @@ func (e *exporter) incCounter(name string, allLabels prometheus.Labels) {
} }
func (e *exporter) observeHistogram(name string, allLabels prometheus.Labels, val float64) { func (e *exporter) observeHistogram(name string, allLabels prometheus.Labels, val float64) {
m, ok := e.metrics.histograms[name] m, ok := e.histograms[name]
if !ok { if !ok {
return return
} }
@@ -331,7 +473,7 @@ func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
e.setGauge(MetricAssignedJobs, e.scaleSetLabels, float64(stats.TotalAssignedJobs)) e.setGauge(MetricAssignedJobs, e.scaleSetLabels, float64(stats.TotalAssignedJobs))
e.setGauge(MetricRunningJobs, e.scaleSetLabels, float64(stats.TotalRunningJobs)) e.setGauge(MetricRunningJobs, e.scaleSetLabels, float64(stats.TotalRunningJobs))
e.setGauge(MetricRegisteredRunners, e.scaleSetLabels, float64(stats.TotalRegisteredRunners)) e.setGauge(MetricRegisteredRunners, e.scaleSetLabels, float64(stats.TotalRegisteredRunners))
e.setGauge(MetricBusyRunners, e.scaleSetLabels, float64(float64(stats.TotalRegisteredRunners))) e.setGauge(MetricBusyRunners, e.scaleSetLabels, float64(stats.TotalBusyRunners))
e.setGauge(MetricIdleRunners, e.scaleSetLabels, float64(stats.TotalIdleRunners)) e.setGauge(MetricIdleRunners, e.scaleSetLabels, float64(stats.TotalIdleRunners))
} }
@@ -339,7 +481,7 @@ func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
l := e.startedJobLabels(msg) l := e.startedJobLabels(msg)
e.incCounter(MetricStartedJobsTotal, l) e.incCounter(MetricStartedJobsTotal, l)
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix() startupDuration := msg.RunnerAssignTime.Unix() - msg.ScaleSetAssignTime.Unix()
e.observeHistogram(MetricJobStartupDurationSeconds, l, float64(startupDuration)) e.observeHistogram(MetricJobStartupDurationSeconds, l, float64(startupDuration))
} }
@@ -347,7 +489,7 @@ func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
l := e.completedJobLabels(msg) l := e.completedJobLabels(msg)
e.incCounter(MetricCompletedJobsTotal, l) e.incCounter(MetricCompletedJobsTotal, l)
executionDuration := msg.JobMessageBase.FinishTime.Unix() - msg.JobMessageBase.RunnerAssignTime.Unix() executionDuration := msg.FinishTime.Unix() - msg.RunnerAssignTime.Unix()
e.observeHistogram(MetricJobExecutionDurationSeconds, l, float64(executionDuration)) e.observeHistogram(MetricJobExecutionDurationSeconds, l, float64(executionDuration))
} }

View File

@@ -0,0 +1,99 @@
package metrics
import (
"testing"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
)
func TestMetricsWithWorkflowRefParsing(t *testing.T) {
// Create a test exporter
exporter := &exporter{
scaleSetLabels: prometheus.Labels{
labelKeyEnterprise: "test-enterprise",
labelKeyOrganization: "test-org",
labelKeyRepository: "test-repo",
labelKeyRunnerScaleSetName: "test-scale-set",
labelKeyRunnerScaleSetNamespace: "test-namespace",
},
}
tests := []struct {
name string
jobBase actions.JobMessageBase
wantName string
wantTarget string
}{
{
name: "main branch workflow",
jobBase: actions.JobMessageBase{
OwnerName: "actions",
RepositoryName: "runner",
JobDisplayName: "Build and Test",
JobWorkflowRef: "actions/runner/.github/workflows/build.yml@refs/heads/main",
EventName: "push",
},
wantName: "build",
wantTarget: "heads/main",
},
{
name: "feature branch workflow",
jobBase: actions.JobMessageBase{
OwnerName: "myorg",
RepositoryName: "myrepo",
JobDisplayName: "CI/CD Pipeline",
JobWorkflowRef: "myorg/myrepo/.github/workflows/ci-cd-pipeline.yml@refs/heads/feature/new-metrics",
EventName: "push",
},
wantName: "ci-cd-pipeline",
wantTarget: "heads/feature/new-metrics",
},
{
name: "pull request workflow",
jobBase: actions.JobMessageBase{
OwnerName: "actions",
RepositoryName: "runner",
JobDisplayName: "PR Checks",
JobWorkflowRef: "actions/runner/.github/workflows/pr-checks.yml@refs/pull/123/merge",
EventName: "pull_request",
},
wantName: "pr-checks",
wantTarget: "pull/123",
},
{
name: "tag workflow",
jobBase: actions.JobMessageBase{
OwnerName: "actions",
RepositoryName: "runner",
JobDisplayName: "Release",
JobWorkflowRef: "actions/runner/.github/workflows/release.yml@refs/tags/v1.2.3",
EventName: "release",
},
wantName: "release",
wantTarget: "tags/v1.2.3",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
labels := exporter.jobLabels(&tt.jobBase)
// Build expected labels
expectedLabels := prometheus.Labels{
labelKeyEnterprise: "test-enterprise",
labelKeyOrganization: tt.jobBase.OwnerName,
labelKeyRepository: tt.jobBase.RepositoryName,
labelKeyJobName: tt.jobBase.JobDisplayName,
labelKeyJobWorkflowRef: tt.jobBase.JobWorkflowRef,
labelKeyJobWorkflowName: tt.wantName,
labelKeyJobWorkflowTarget: tt.wantTarget,
labelKeyEventName: tt.jobBase.EventName,
}
// Assert all expected labels match
assert.Equal(t, expectedLabels, labels, "jobLabels() returned unexpected labels for %s", tt.name)
})
}
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/go-logr/logr" "github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestInstallMetrics(t *testing.T) { func TestInstallMetrics(t *testing.T) {
@@ -86,3 +87,179 @@ func TestInstallMetrics(t *testing.T) {
assert.Equal(t, duration.config.Labels, metricsConfig.Histograms[MetricJobStartupDurationSeconds].Labels) assert.Equal(t, duration.config.Labels, metricsConfig.Histograms[MetricJobStartupDurationSeconds].Labels)
assert.Equal(t, duration.config.Buckets, defaultRuntimeBuckets) assert.Equal(t, duration.config.Buckets, defaultRuntimeBuckets)
} }
func TestNewExporter(t *testing.T) {
t.Run("with defaults metrics applied", func(t *testing.T) {
config := ExporterConfig{
ScaleSetName: "test-scale-set",
ScaleSetNamespace: "test-namespace",
Enterprise: "",
Organization: "org",
Repository: "repo",
ServerAddr: ":6060",
ServerEndpoint: "/metrics",
Logger: logr.Discard(),
Metrics: nil, // when metrics is nil, all default metrics should be registered
}
exporter, ok := NewExporter(config).(*exporter)
require.True(t, ok, "expected exporter to be of type *exporter")
require.NotNil(t, exporter)
reg := prometheus.NewRegistry()
wantMetrics := installMetrics(defaultMetrics, reg, config.Logger)
assert.Equal(t, len(wantMetrics.counters), len(exporter.counters))
for k, v := range wantMetrics.counters {
assert.Contains(t, exporter.counters, k)
assert.Equal(t, v.config, exporter.counters[k].config)
}
assert.Equal(t, len(wantMetrics.gauges), len(exporter.gauges))
for k, v := range wantMetrics.gauges {
assert.Contains(t, exporter.gauges, k)
assert.Equal(t, v.config, exporter.gauges[k].config)
}
assert.Equal(t, len(wantMetrics.histograms), len(exporter.histograms))
for k, v := range wantMetrics.histograms {
assert.Contains(t, exporter.histograms, k)
assert.Equal(t, v.config, exporter.histograms[k].config)
}
require.NotNil(t, exporter.srv)
assert.Equal(t, config.ServerAddr, exporter.srv.Addr)
})
t.Run("with default server URL", func(t *testing.T) {
config := ExporterConfig{
ScaleSetName: "test-scale-set",
ScaleSetNamespace: "test-namespace",
Enterprise: "",
Organization: "org",
Repository: "repo",
ServerAddr: "", // empty ServerAddr should default to ":8080"
ServerEndpoint: "",
Logger: logr.Discard(),
Metrics: nil, // when metrics is nil, all default metrics should be registered
}
exporter, ok := NewExporter(config).(*exporter)
require.True(t, ok, "expected exporter to be of type *exporter")
require.NotNil(t, exporter)
reg := prometheus.NewRegistry()
wantMetrics := installMetrics(defaultMetrics, reg, config.Logger)
assert.Equal(t, len(wantMetrics.counters), len(exporter.counters))
for k, v := range wantMetrics.counters {
assert.Contains(t, exporter.counters, k)
assert.Equal(t, v.config, exporter.counters[k].config)
}
assert.Equal(t, len(wantMetrics.gauges), len(exporter.gauges))
for k, v := range wantMetrics.gauges {
assert.Contains(t, exporter.gauges, k)
assert.Equal(t, v.config, exporter.gauges[k].config)
}
assert.Equal(t, len(wantMetrics.histograms), len(exporter.histograms))
for k, v := range wantMetrics.histograms {
assert.Contains(t, exporter.histograms, k)
assert.Equal(t, v.config, exporter.histograms[k].config)
}
require.NotNil(t, exporter.srv)
assert.Equal(t, exporter.srv.Addr, ":8080")
})
t.Run("with metrics configured", func(t *testing.T) {
metricsConfig := v1alpha1.MetricsConfig{
Counters: map[string]*v1alpha1.CounterMetric{
MetricStartedJobsTotal: {
Labels: []string{labelKeyRepository},
},
},
Gauges: map[string]*v1alpha1.GaugeMetric{
MetricAssignedJobs: {
Labels: []string{labelKeyRepository},
},
},
Histograms: map[string]*v1alpha1.HistogramMetric{
MetricJobExecutionDurationSeconds: {
Labels: []string{labelKeyRepository},
Buckets: []float64{0.1, 1},
},
},
}
config := ExporterConfig{
ScaleSetName: "test-scale-set",
ScaleSetNamespace: "test-namespace",
Enterprise: "",
Organization: "org",
Repository: "repo",
ServerAddr: ":6060",
ServerEndpoint: "/metrics",
Logger: logr.Discard(),
Metrics: &metricsConfig,
}
exporter, ok := NewExporter(config).(*exporter)
require.True(t, ok, "expected exporter to be of type *exporter")
require.NotNil(t, exporter)
reg := prometheus.NewRegistry()
wantMetrics := installMetrics(metricsConfig, reg, config.Logger)
assert.Equal(t, len(wantMetrics.counters), len(exporter.counters))
for k, v := range wantMetrics.counters {
assert.Contains(t, exporter.counters, k)
assert.Equal(t, v.config, exporter.counters[k].config)
}
assert.Equal(t, len(wantMetrics.gauges), len(exporter.gauges))
for k, v := range wantMetrics.gauges {
assert.Contains(t, exporter.gauges, k)
assert.Equal(t, v.config, exporter.gauges[k].config)
}
assert.Equal(t, len(wantMetrics.histograms), len(exporter.histograms))
for k, v := range wantMetrics.histograms {
assert.Contains(t, exporter.histograms, k)
assert.Equal(t, v.config, exporter.histograms[k].config)
}
require.NotNil(t, exporter.srv)
assert.Equal(t, config.ServerAddr, exporter.srv.Addr)
})
}
func TestExporterConfigDefaults(t *testing.T) {
config := ExporterConfig{
ScaleSetName: "test-scale-set",
ScaleSetNamespace: "test-namespace",
Enterprise: "",
Organization: "org",
Repository: "repo",
ServerAddr: "",
ServerEndpoint: "",
Logger: logr.Discard(),
Metrics: nil, // when metrics is nil, all default metrics should be registered
}
config.defaults()
want := ExporterConfig{
ScaleSetName: "test-scale-set",
ScaleSetNamespace: "test-namespace",
Enterprise: "",
Organization: "org",
Repository: "repo",
ServerAddr: ":8080", // default server address
ServerEndpoint: "/metrics", // default server endpoint
Logger: logr.Discard(),
Metrics: &defaultMetrics, // when metrics is nil, all default metrics should be registered
}
assert.Equal(t, want, config)
}

View File

@@ -0,0 +1,78 @@
package metrics
import (
"path"
"strings"
)
// WorkflowRefInfo contains parsed information from a job_workflow_ref
type WorkflowRefInfo struct {
// Name is the workflow file name without extension
Name string
// Target is the target ref with type prefix retained for clarity
// Examples:
// - heads/main (branch)
// - heads/feature/new-feature (branch)
// - tags/v1.2.3 (tag)
// - pull/123 (pull request)
Target string
}
// ParseWorkflowRef parses a job_workflow_ref string to extract workflow name and target
// Format: {owner}/{repo}/.github/workflows/{workflow_file}@{ref}
// Example: mygithuborg/myrepo/.github/workflows/blank.yml@refs/heads/main
//
// The target field preserves type prefixes to differentiate between:
// - Branch references: "heads/{branch}" (from refs/heads/{branch})
// - Tag references: "tags/{tag}" (from refs/tags/{tag})
// - Pull requests: "pull/{number}" (from refs/pull/{number}/merge)
func ParseWorkflowRef(workflowRef string) WorkflowRefInfo {
info := WorkflowRefInfo{}
if workflowRef == "" {
return info
}
// Split by @ to separate path and ref
parts := strings.Split(workflowRef, "@")
if len(parts) != 2 {
return info
}
workflowPath := parts[0]
ref := parts[1]
// Extract workflow name from path
// The path format is: {owner}/{repo}/.github/workflows/{workflow_file}
workflowFile := path.Base(workflowPath)
// Remove .yml or .yaml extension
info.Name = strings.TrimSuffix(strings.TrimSuffix(workflowFile, ".yml"), ".yaml")
// Extract target from ref based on type
// Branch refs: refs/heads/{branch}
// Tag refs: refs/tags/{tag}
// PR refs: refs/pull/{number}/merge
const (
branchPrefix = "refs/heads/"
tagPrefix = "refs/tags/"
prPrefix = "refs/pull/"
)
switch {
case strings.HasPrefix(ref, branchPrefix):
// Keep "heads/" prefix to indicate branch
info.Target = "heads/" + strings.TrimPrefix(ref, branchPrefix)
case strings.HasPrefix(ref, tagPrefix):
// Keep "tags/" prefix to indicate tag
info.Target = "tags/" + strings.TrimPrefix(ref, tagPrefix)
case strings.HasPrefix(ref, prPrefix):
// Extract PR number from refs/pull/{number}/merge
// Keep "pull/" prefix to indicate pull request
prPart := strings.TrimPrefix(ref, prPrefix)
if idx := strings.Index(prPart, "/"); idx > 0 {
info.Target = "pull/" + prPart[:idx]
}
}
return info
}

View File

@@ -0,0 +1,82 @@
package metrics
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseWorkflowRef(t *testing.T) {
tests := []struct {
name string
workflowRef string
wantName string
wantTarget string
}{
{
name: "standard branch reference with yml",
workflowRef: "actions-runner-controller-sandbox/mumoshu-orgrunner-test-01/.github/workflows/blank.yml@refs/heads/main",
wantName: "blank",
wantTarget: "heads/main",
},
{
name: "branch with special characters",
workflowRef: "owner/repo/.github/workflows/ci-cd.yml@refs/heads/feature/new-feature",
wantName: "ci-cd",
wantTarget: "heads/feature/new-feature",
},
{
name: "yaml extension",
workflowRef: "owner/repo/.github/workflows/deploy.yaml@refs/heads/develop",
wantName: "deploy",
wantTarget: "heads/develop",
},
{
name: "tag reference",
workflowRef: "owner/repo/.github/workflows/release.yml@refs/tags/v1.0.0",
wantName: "release",
wantTarget: "tags/v1.0.0",
},
{
name: "pull request reference",
workflowRef: "owner/repo/.github/workflows/test.yml@refs/pull/123/merge",
wantName: "test",
wantTarget: "pull/123",
},
{
name: "empty workflow ref",
workflowRef: "",
wantName: "",
wantTarget: "",
},
{
name: "invalid format - no @ separator",
workflowRef: "owner/repo/.github/workflows/test.yml",
wantName: "",
wantTarget: "",
},
{
name: "workflow with dots in name",
workflowRef: "owner/repo/.github/workflows/build.test.yml@refs/heads/main",
wantName: "build.test",
wantTarget: "heads/main",
},
{
name: "workflow with hyphen and underscore",
workflowRef: "owner/repo/.github/workflows/build-test_deploy.yml@refs/heads/main",
wantName: "build-test_deploy",
wantTarget: "heads/main",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := ParseWorkflowRef(tt.workflowRef)
expected := WorkflowRefInfo{
Name: tt.wantName,
Target: tt.wantTarget,
}
assert.Equal(t, expected, got, "ParseWorkflowRef(%q) returned unexpected result", tt.workflowRef)
})
}
}

View File

@@ -100,10 +100,11 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
"runnerName", jobInfo.RunnerName, "runnerName", jobInfo.RunnerName,
"ownerName", jobInfo.OwnerName, "ownerName", jobInfo.OwnerName,
"repoName", jobInfo.RepositoryName, "repoName", jobInfo.RepositoryName,
"jobId", jobInfo.JobID,
"workflowRef", jobInfo.JobWorkflowRef, "workflowRef", jobInfo.JobWorkflowRef,
"workflowRunId", jobInfo.WorkflowRunId, "workflowRunId", jobInfo.WorkflowRunID,
"jobDisplayName", jobInfo.JobDisplayName, "jobDisplayName", jobInfo.JobDisplayName,
"requestId", jobInfo.RunnerRequestId) "requestId", jobInfo.RunnerRequestID)
original, err := json.Marshal(&v1alpha1.EphemeralRunner{}) original, err := json.Marshal(&v1alpha1.EphemeralRunner{})
if err != nil { if err != nil {
@@ -113,9 +114,10 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
patch, err := json.Marshal( patch, err := json.Marshal(
&v1alpha1.EphemeralRunner{ &v1alpha1.EphemeralRunner{
Status: v1alpha1.EphemeralRunnerStatus{ Status: v1alpha1.EphemeralRunnerStatus{
JobRequestId: jobInfo.RunnerRequestId, JobRequestId: jobInfo.RunnerRequestID,
JobRepositoryName: fmt.Sprintf("%s/%s", jobInfo.OwnerName, jobInfo.RepositoryName), JobRepositoryName: fmt.Sprintf("%s/%s", jobInfo.OwnerName, jobInfo.RepositoryName),
WorkflowRunId: jobInfo.WorkflowRunId, JobID: jobInfo.JobID,
WorkflowRunId: jobInfo.WorkflowRunID,
JobWorkflowRef: jobInfo.JobWorkflowRef, JobWorkflowRef: jobInfo.JobWorkflowRef,
JobDisplayName: jobInfo.JobDisplayName, JobDisplayName: jobInfo.JobDisplayName,
}, },

File diff suppressed because it is too large Load Diff

View File

@@ -15504,6 +15504,53 @@ spec:
- containers - containers
type: object type: object
type: object type: object
vaultConfig:
properties:
azureKeyVault:
properties:
certificatePath:
type: string
clientId:
type: string
tenantId:
type: string
url:
type: string
required:
- certificatePath
- clientId
- tenantId
- url
type: object
proxy:
properties:
http:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
https:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
noProxy:
items:
type: string
type: array
type: object
type:
description: |-
VaultType represents the type of vault that can be used in the application.
It is used to identify which vault integration should be used to resolve secrets.
type: string
type: object
type: object type: object
status: status:
description: AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet description: AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet
@@ -15524,4 +15571,3 @@ spec:
storage: true storage: true
subresources: subresources:
status: {} status: {}
preserveUnknownFields: false

View File

@@ -36,6 +36,9 @@ spec:
- jsonPath: .status.jobDisplayName - jsonPath: .status.jobDisplayName
name: JobDisplayName name: JobDisplayName
type: string type: string
- jsonPath: .status.jobId
name: JobId
type: string
- jsonPath: .status.message - jsonPath: .status.message
name: Message name: Message
type: string type: string
@@ -7784,6 +7787,53 @@ spec:
required: required:
- containers - containers
type: object type: object
vaultConfig:
properties:
azureKeyVault:
properties:
certificatePath:
type: string
clientId:
type: string
tenantId:
type: string
url:
type: string
required:
- certificatePath
- clientId
- tenantId
- url
type: object
proxy:
properties:
http:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
https:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
noProxy:
items:
type: string
type: array
type: object
type:
description: |-
VaultType represents the type of vault that can be used in the application.
It is used to identify which vault integration should be used to resolve secrets.
type: string
type: object
required: required:
- githubConfigSecret - githubConfigSecret
- githubConfigUrl - githubConfigUrl
@@ -7794,10 +7844,13 @@ spec:
properties: properties:
failures: failures:
additionalProperties: additionalProperties:
type: boolean format: date-time
type: string
type: object type: object
jobDisplayName: jobDisplayName:
type: string type: string
jobId:
type: string
jobRepositoryName: jobRepositoryName:
type: string type: string
jobRequestId: jobRequestId:
@@ -7826,8 +7879,6 @@ spec:
type: string type: string
runnerId: runnerId:
type: integer type: integer
runnerJITConfig:
type: string
runnerName: runnerName:
type: string type: string
workflowRunId: workflowRunId:
@@ -7839,4 +7890,3 @@ spec:
storage: true storage: true
subresources: subresources:
status: {} status: {}
preserveUnknownFields: false

View File

@@ -7778,6 +7778,53 @@ spec:
required: required:
- containers - containers
type: object type: object
vaultConfig:
properties:
azureKeyVault:
properties:
certificatePath:
type: string
clientId:
type: string
tenantId:
type: string
url:
type: string
required:
- certificatePath
- clientId
- tenantId
- url
type: object
proxy:
properties:
http:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
https:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
noProxy:
items:
type: string
type: array
type: object
type:
description: |-
VaultType represents the type of vault that can be used in the application.
It is used to identify which vault integration should be used to resolve secrets.
type: string
type: object
required: required:
- githubConfigSecret - githubConfigSecret
- githubConfigUrl - githubConfigUrl
@@ -7812,4 +7859,3 @@ spec:
storage: true storage: true
subresources: subresources:
status: {} status: {}
preserveUnknownFields: false

View File

@@ -12,306 +12,313 @@ spec:
listKind: HorizontalRunnerAutoscalerList listKind: HorizontalRunnerAutoscalerList
plural: horizontalrunnerautoscalers plural: horizontalrunnerautoscalers
shortNames: shortNames:
- hra - hra
singular: horizontalrunnerautoscaler singular: horizontalrunnerautoscaler
scope: Namespaced scope: Namespaced
versions: versions:
- additionalPrinterColumns: - additionalPrinterColumns:
- jsonPath: .spec.minReplicas - jsonPath: .spec.minReplicas
name: Min name: Min
type: number type: number
- jsonPath: .spec.maxReplicas - jsonPath: .spec.maxReplicas
name: Max name: Max
type: number type: number
- jsonPath: .status.desiredReplicas - jsonPath: .status.desiredReplicas
name: Desired name: Desired
type: number type: number
- jsonPath: .status.scheduledOverridesSummary - jsonPath: .status.scheduledOverridesSummary
name: Schedule name: Schedule
type: string type: string
name: v1alpha1 name: v1alpha1
schema: schema:
openAPIV3Schema: openAPIV3Schema:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
properties: API
apiVersion: properties:
description: |- apiVersion:
APIVersion defines the versioned schema of this representation of an object. description: |-
Servers should convert recognized schemas to the latest internal value, and APIVersion defines the versioned schema of this representation of an object.
may reject unrecognized values. Servers should convert recognized schemas to the latest internal value, and
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources may reject unrecognized values.
type: string More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
kind: type: string
description: |- kind:
Kind is a string value representing the REST resource this object represents. description: |-
Servers may infer this from the endpoint the client submits requests to. Kind is a string value representing the REST resource this object represents.
Cannot be updated. Servers may infer this from the endpoint the client submits requests to.
In CamelCase. Cannot be updated.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds In CamelCase.
type: string More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
metadata: type: string
type: object metadata:
spec: type: object
description: HorizontalRunnerAutoscalerSpec defines the desired state of HorizontalRunnerAutoscaler spec:
properties: description: HorizontalRunnerAutoscalerSpec defines the desired state
capacityReservations: of HorizontalRunnerAutoscaler
items: properties:
description: |- capacityReservations:
CapacityReservation specifies the number of replicas temporarily added items:
to the scale target until ExpirationTime.
properties:
effectiveTime:
format: date-time
type: string
expirationTime:
format: date-time
type: string
name:
type: string
replicas:
type: integer
type: object
type: array
githubAPICredentialsFrom:
properties:
secretRef:
properties:
name:
type: string
required:
- name
type: object
type: object
maxReplicas:
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
type: integer
metrics:
description: Metrics is the collection of various metric targets to calculate desired number of runners
items:
properties:
repositoryNames:
description: |-
RepositoryNames is the list of repository names to be used for calculating the metric.
For example, a repository name is the REPO part of `github.com/USER/REPO`.
items:
type: string
type: array
scaleDownAdjustment:
description: |-
ScaleDownAdjustment is the number of runners removed on scale-down.
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
type: integer
scaleDownFactor:
description: |-
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be removed.
type: string
scaleDownThreshold:
description: |-
ScaleDownThreshold is the percentage of busy runners less than which will
trigger the hpa to scale the runners down.
type: string
scaleUpAdjustment:
description: |-
ScaleUpAdjustment is the number of runners added on scale-up.
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
type: integer
scaleUpFactor:
description: |-
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be added.
type: string
scaleUpThreshold:
description: |-
ScaleUpThreshold is the percentage of busy runners greater than which will
trigger the hpa to scale runners up.
type: string
type:
description: |-
Type is the type of metric to be used for autoscaling.
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
type: string
type: object
type: array
minReplicas:
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
type: integer
scaleDownDelaySecondsAfterScaleOut:
description: |- description: |-
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up CapacityReservation specifies the number of replicas temporarily added
Used to prevent flapping (down->up->down->... loop) to the scale target until ExpirationTime.
type: integer
scaleTargetRef:
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
properties: properties:
kind: effectiveTime:
description: Kind is the type of resource being referenced format: date-time
enum: type: string
- RunnerDeployment expirationTime:
- RunnerSet format: date-time
type: string type: string
name: name:
description: Name is the name of resource being referenced
type: string type: string
replicas:
type: integer
type: object type: object
scaleUpTriggers: type: array
description: |- githubAPICredentialsFrom:
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1 properties:
on each webhook requested received by the webhookBasedAutoscaler. secretRef:
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
items:
properties: properties:
amount: name:
type: integer
duration:
type: string
githubEvent:
properties:
checkRun:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties:
names:
description: |-
Names is a list of GitHub Actions glob patterns.
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to trigger depending on the job.
items:
type: string
type: array
repositories:
description: |-
Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
items:
type: string
type: array
status:
type: string
types:
description: 'One of: created, rerequested, or completed'
items:
type: string
type: array
type: object
pullRequest:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
properties:
branches:
items:
type: string
type: array
types:
items:
type: string
type: array
type: object
push:
description: |-
PushSpec is the condition for triggering scale-up on push event
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object
workflowJob:
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
type: object
type: object
type: object
type: array
scheduledOverrides:
description: |-
ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
The earlier a scheduled override is, the higher it is prioritized.
items:
description: |-
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
properties:
endTime:
description: EndTime is the time at which the first override ends.
format: date-time
type: string
minReplicas:
description: |-
MinReplicas is the number of runners while overriding.
If omitted, it doesn't override minReplicas.
minimum: 0
nullable: true
type: integer
recurrenceRule:
properties:
frequency:
description: |-
Frequency is the name of a predefined interval of each recurrence.
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
If empty, the corresponding override happens only once.
enum:
- Daily
- Weekly
- Monthly
- Yearly
type: string
untilTime:
description: |-
UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
format: date-time
type: string
type: object
startTime:
description: StartTime is the time at which the first override starts.
format: date-time
type: string type: string
required: required:
- endTime - name
- startTime
type: object type: object
type: array type: object
type: object maxReplicas:
status: description: MaxReplicas is the maximum number of replicas the deployment
properties: is allowed to scale
cacheEntries: type: integer
items: metrics:
properties: description: Metrics is the collection of various metric targets to
expirationTime: calculate desired number of runners
format: date-time items:
properties:
repositoryNames:
description: |-
RepositoryNames is the list of repository names to be used for calculating the metric.
For example, a repository name is the REPO part of `github.com/USER/REPO`.
items:
type: string type: string
key: type: array
type: string scaleDownAdjustment:
value: description: |-
type: integer ScaleDownAdjustment is the number of runners removed on scale-down.
type: object You can only specify either ScaleDownFactor or ScaleDownAdjustment.
type: array type: integer
desiredReplicas: scaleDownFactor:
description: |-
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be removed.
type: string
scaleDownThreshold:
description: |-
ScaleDownThreshold is the percentage of busy runners less than which will
trigger the hpa to scale the runners down.
type: string
scaleUpAdjustment:
description: |-
ScaleUpAdjustment is the number of runners added on scale-up.
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
type: integer
scaleUpFactor:
description: |-
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be added.
type: string
scaleUpThreshold:
description: |-
ScaleUpThreshold is the percentage of busy runners greater than which will
trigger the hpa to scale runners up.
type: string
type:
description: |-
Type is the type of metric to be used for autoscaling.
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
type: string
type: object
type: array
minReplicas:
description: MinReplicas is the minimum number of replicas the deployment
is allowed to scale
type: integer
scaleDownDelaySecondsAfterScaleOut:
description: |-
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
Used to prevent flapping (down->up->down->... loop)
type: integer
scaleTargetRef:
description: ScaleTargetRef is the reference to scaled resource like
RunnerDeployment
properties:
kind:
description: Kind is the type of resource being referenced
enum:
- RunnerDeployment
- RunnerSet
type: string
name:
description: Name is the name of resource being referenced
type: string
type: object
scaleUpTriggers:
description: |-
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
on each webhook requested received by the webhookBasedAutoscaler.
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
items:
properties:
amount:
type: integer
duration:
type: string
githubEvent:
properties:
checkRun:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties:
names:
description: |-
Names is a list of GitHub Actions glob patterns.
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to trigger depending on the job.
items:
type: string
type: array
repositories:
description: |-
Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
items:
type: string
type: array
status:
type: string
types:
description: 'One of: created, rerequested, or completed'
items:
type: string
type: array
type: object
pullRequest:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
properties:
branches:
items:
type: string
type: array
types:
items:
type: string
type: array
type: object
push:
description: |-
PushSpec is the condition for triggering scale-up on push event
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object
workflowJob:
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
type: object
type: object
type: object
type: array
scheduledOverrides:
description: |-
ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
The earlier a scheduled override is, the higher it is prioritized.
items:
description: |- description: |-
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset. A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
type: integer properties:
lastSuccessfulScaleOutTime: endTime:
format: date-time description: EndTime is the time at which the first override
nullable: true ends.
type: string format: date-time
observedGeneration: type: string
description: |- minReplicas:
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g. description: |-
RunnerDeployment's generation, which is updated on mutation by the API Server. MinReplicas is the number of runners while overriding.
format: int64 If omitted, it doesn't override minReplicas.
type: integer minimum: 0
scheduledOverridesSummary: nullable: true
description: |- type: integer
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output recurrenceRule:
for observability. properties:
type: string frequency:
type: object description: |-
type: object Frequency is the name of a predefined interval of each recurrence.
served: true The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
storage: true If empty, the corresponding override happens only once.
subresources: enum:
status: {} - Daily
preserveUnknownFields: false - Weekly
- Monthly
- Yearly
type: string
untilTime:
description: |-
UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
format: date-time
type: string
type: object
startTime:
description: StartTime is the time at which the first override
starts.
format: date-time
type: string
required:
- endTime
- startTime
type: object
type: array
type: object
status:
properties:
cacheEntries:
items:
properties:
expirationTime:
format: date-time
type: string
key:
type: string
value:
type: integer
type: object
type: array
desiredReplicas:
description: |-
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
type: integer
lastSuccessfulScaleOutTime:
format: date-time
nullable: true
type: string
observedGeneration:
description: |-
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
RunnerDeployment's generation, which is updated on mutation by the API Server.
format: int64
type: integer
scheduledOverridesSummary:
description: |-
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
for observability.
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -9348,4 +9348,3 @@ spec:
storage: true storage: true
subresources: subresources:
status: {} status: {}
preserveUnknownFields: false

View File

@@ -9322,4 +9322,3 @@ spec:
storage: true storage: true
subresources: subresources:
status: {} status: {}
preserveUnknownFields: false

View File

@@ -9328,4 +9328,3 @@ spec:
storage: true storage: true
subresources: subresources:
status: {} status: {}
preserveUnknownFields: false

View File

@@ -8389,4 +8389,3 @@ spec:
storage: true storage: true
subresources: subresources:
status: {} status: {}
preserveUnknownFields: false

View File

@@ -32,6 +32,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/reconcile"
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics" "github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
"github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/github/actions"
hash "github.com/actions/actions-runner-controller/hash" hash "github.com/actions/actions-runner-controller/hash"
@@ -77,7 +78,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
return ctrl.Result{}, client.IgnoreNotFound(err) return ctrl.Result{}, client.IgnoreNotFound(err)
} }
if !autoscalingListener.ObjectMeta.DeletionTimestamp.IsZero() { if !autoscalingListener.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(autoscalingListener, autoscalingListenerFinalizerName) { if !controllerutil.ContainsFinalizer(autoscalingListener, autoscalingListenerFinalizerName) {
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
@@ -128,41 +129,24 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
return ctrl.Result{}, err return ctrl.Result{}, err
} }
// Check if the GitHub config secret exists appConfig, err := r.GetAppConfig(ctx, &autoscalingRunnerSet)
secret := new(corev1.Secret) if err != nil {
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Spec.GitHubConfigSecret}, secret); err != nil { log.Error(
log.Error(err, "Failed to find GitHub config secret.", err,
"namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "Failed to get app config for AutoscalingRunnerSet.",
"name", autoscalingListener.Spec.GitHubConfigSecret) "namespace",
autoscalingRunnerSet.Namespace,
"name",
autoscalingRunnerSet.GitHubConfigSecret,
)
return ctrl.Result{}, err return ctrl.Result{}, err
} }
// Create a mirror secret in the same namespace as the AutoscalingListener
mirrorSecret := new(corev1.Secret)
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerSecretMirrorName(autoscalingListener)}, mirrorSecret); err != nil {
if !kerrors.IsNotFound(err) {
log.Error(err, "Unable to get listener secret mirror", "namespace", autoscalingListener.Namespace, "name", scaleSetListenerSecretMirrorName(autoscalingListener))
return ctrl.Result{}, err
}
// Create a mirror secret for the listener pod in the Controller namespace for listener pod to use
log.Info("Creating a mirror listener secret for the listener pod")
return r.createSecretsForListener(ctx, autoscalingListener, secret, log)
}
// make sure the mirror secret is up to date
mirrorSecretDataHash := mirrorSecret.Labels["secret-data-hash"]
secretDataHash := hash.ComputeTemplateHash(secret.Data)
if mirrorSecretDataHash != secretDataHash {
log.Info("Updating mirror listener secret for the listener pod", "mirrorSecretDataHash", mirrorSecretDataHash, "secretDataHash", secretDataHash)
return r.updateSecretsForListener(ctx, secret, mirrorSecret, log)
}
// Make sure the runner scale set listener service account is created for the listener pod in the controller namespace // Make sure the runner scale set listener service account is created for the listener pod in the controller namespace
serviceAccount := new(corev1.ServiceAccount) serviceAccount := new(corev1.ServiceAccount)
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerServiceAccountName(autoscalingListener)}, serviceAccount); err != nil { if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: autoscalingListener.Name}, serviceAccount); err != nil {
if !kerrors.IsNotFound(err) { if !kerrors.IsNotFound(err) {
log.Error(err, "Unable to get listener service accounts", "namespace", autoscalingListener.Namespace, "name", scaleSetListenerServiceAccountName(autoscalingListener)) log.Error(err, "Unable to get listener service accounts", "namespace", autoscalingListener.Namespace, "name", autoscalingListener.Name)
return ctrl.Result{}, err return ctrl.Result{}, err
} }
@@ -175,9 +159,9 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
// Make sure the runner scale set listener role is created in the AutoscalingRunnerSet namespace // Make sure the runner scale set listener role is created in the AutoscalingRunnerSet namespace
listenerRole := new(rbacv1.Role) listenerRole := new(rbacv1.Role)
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRole); err != nil { if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRole); err != nil {
if !kerrors.IsNotFound(err) { if !kerrors.IsNotFound(err) {
log.Error(err, "Unable to get listener role", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", scaleSetListenerRoleName(autoscalingListener)) log.Error(err, "Unable to get listener role", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", autoscalingListener.Name)
return ctrl.Result{}, err return ctrl.Result{}, err
} }
@@ -197,9 +181,9 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
// Make sure the runner scale set listener role binding is created // Make sure the runner scale set listener role binding is created
listenerRoleBinding := new(rbacv1.RoleBinding) listenerRoleBinding := new(rbacv1.RoleBinding)
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRoleBinding); err != nil { if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRoleBinding); err != nil {
if !kerrors.IsNotFound(err) { if !kerrors.IsNotFound(err) {
log.Error(err, "Unable to get listener role binding", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", scaleSetListenerRoleName(autoscalingListener)) log.Error(err, "Unable to get listener role binding", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", autoscalingListener.Name)
return ctrl.Result{}, err return ctrl.Result{}, err
} }
@@ -239,7 +223,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
// Create a listener pod in the controller namespace // Create a listener pod in the controller namespace
log.Info("Creating a listener pod") log.Info("Creating a listener pod")
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, mirrorSecret, log) return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, appConfig, log)
} }
cs := listenerContainerStatus(listenerPod) cs := listenerContainerStatus(listenerPod)
@@ -260,6 +244,19 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name) log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return ctrl.Result{}, err return ctrl.Result{}, err
} }
// delete the listener config secret as well, so it gets recreated when the listener pod is recreated, with any new data if it exists
var configSecret corev1.Secret
err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &configSecret)
switch {
case err == nil && configSecret.DeletionTimestamp.IsZero():
log.Info("Deleting the listener config secret")
if err := r.Delete(ctx, &configSecret); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to delete listener config secret: %w", err)
}
case !kerrors.IsNotFound(err):
return ctrl.Result{}, fmt.Errorf("failed to get the listener config secret: %w", err)
}
} }
return ctrl.Result{}, nil return ctrl.Result{}, nil
case cs.State.Running != nil: case cs.State.Running != nil:
@@ -281,7 +278,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerPod) err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerPod)
switch { switch {
case err == nil: case err == nil:
if listenerPod.ObjectMeta.DeletionTimestamp.IsZero() { if listenerPod.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener pod") logger.Info("Deleting the listener pod")
if err := r.Delete(ctx, listenerPod); err != nil { if err := r.Delete(ctx, listenerPod); err != nil {
return false, fmt.Errorf("failed to delete listener pod: %w", err) return false, fmt.Errorf("failed to delete listener pod: %w", err)
@@ -299,7 +296,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &secret) err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &secret)
switch { switch {
case err == nil: case err == nil:
if secret.ObjectMeta.DeletionTimestamp.IsZero() { if secret.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener config secret") logger.Info("Deleting the listener config secret")
if err := r.Delete(ctx, &secret); err != nil { if err := r.Delete(ctx, &secret); err != nil {
return false, fmt.Errorf("failed to delete listener config secret: %w", err) return false, fmt.Errorf("failed to delete listener config secret: %w", err)
@@ -316,7 +313,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
err = r.Get(ctx, types.NamespacedName{Name: proxyListenerSecretName(autoscalingListener), Namespace: autoscalingListener.Namespace}, proxySecret) err = r.Get(ctx, types.NamespacedName{Name: proxyListenerSecretName(autoscalingListener), Namespace: autoscalingListener.Namespace}, proxySecret)
switch { switch {
case err == nil: case err == nil:
if proxySecret.ObjectMeta.DeletionTimestamp.IsZero() { if proxySecret.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener proxy secret") logger.Info("Deleting the listener proxy secret")
if err := r.Delete(ctx, proxySecret); err != nil { if err := r.Delete(ctx, proxySecret); err != nil {
return false, fmt.Errorf("failed to delete listener proxy secret: %w", err) return false, fmt.Errorf("failed to delete listener proxy secret: %w", err)
@@ -330,10 +327,10 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
} }
listenerRoleBinding := new(rbacv1.RoleBinding) listenerRoleBinding := new(rbacv1.RoleBinding)
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRoleBinding) err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRoleBinding)
switch { switch {
case err == nil: case err == nil:
if listenerRoleBinding.ObjectMeta.DeletionTimestamp.IsZero() { if listenerRoleBinding.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener role binding") logger.Info("Deleting the listener role binding")
if err := r.Delete(ctx, listenerRoleBinding); err != nil { if err := r.Delete(ctx, listenerRoleBinding); err != nil {
return false, fmt.Errorf("failed to delete listener role binding: %w", err) return false, fmt.Errorf("failed to delete listener role binding: %w", err)
@@ -346,10 +343,10 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
logger.Info("Listener role binding is deleted") logger.Info("Listener role binding is deleted")
listenerRole := new(rbacv1.Role) listenerRole := new(rbacv1.Role)
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRole) err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRole)
switch { switch {
case err == nil: case err == nil:
if listenerRole.ObjectMeta.DeletionTimestamp.IsZero() { if listenerRole.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener role") logger.Info("Deleting the listener role")
if err := r.Delete(ctx, listenerRole); err != nil { if err := r.Delete(ctx, listenerRole); err != nil {
return false, fmt.Errorf("failed to delete listener role: %w", err) return false, fmt.Errorf("failed to delete listener role: %w", err)
@@ -363,10 +360,10 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
logger.Info("Cleaning up the listener service account") logger.Info("Cleaning up the listener service account")
listenerSa := new(corev1.ServiceAccount) listenerSa := new(corev1.ServiceAccount)
err = r.Get(ctx, types.NamespacedName{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, listenerSa) err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerSa)
switch { switch {
case err == nil: case err == nil:
if listenerSa.ObjectMeta.DeletionTimestamp.IsZero() { if listenerSa.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener service account") logger.Info("Deleting the listener service account")
if err := r.Delete(ctx, listenerSa); err != nil { if err := r.Delete(ctx, listenerSa); err != nil {
return false, fmt.Errorf("failed to delete listener service account: %w", err) return false, fmt.Errorf("failed to delete listener service account: %w", err)
@@ -382,7 +379,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
} }
func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) { func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
newServiceAccount := r.ResourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener) newServiceAccount := r.newScaleSetListenerServiceAccount(autoscalingListener)
if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil { if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil {
return ctrl.Result{}, err return ctrl.Result{}, err
@@ -398,7 +395,7 @@ func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx cont
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) { func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, appConfig *appconfig.AppConfig, logger logr.Logger) (ctrl.Result, error) {
var envs []corev1.EnvVar var envs []corev1.EnvVar
if autoscalingListener.Spec.Proxy != nil { if autoscalingListener.Spec.Proxy != nil {
httpURL := corev1.EnvVar{ httpURL := corev1.EnvVar{
@@ -467,7 +464,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
logger.Info("Creating listener config secret") logger.Info("Creating listener config secret")
podConfig, err := r.ResourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert) podConfig, err := r.newScaleSetListenerConfig(autoscalingListener, appConfig, metricsConfig, cert)
if err != nil { if err != nil {
logger.Error(err, "Failed to build listener config secret") logger.Error(err, "Failed to build listener config secret")
return ctrl.Result{}, err return ctrl.Result{}, err
@@ -486,7 +483,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
return ctrl.Result{Requeue: true}, nil return ctrl.Result{Requeue: true}, nil
} }
newPod, err := r.ResourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...) newPod, err := r.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, metricsConfig, envs...)
if err != nil { if err != nil {
logger.Error(err, "Failed to build listener pod") logger.Error(err, "Failed to build listener pod")
return ctrl.Result{}, err return ctrl.Result{}, err
@@ -545,23 +542,6 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
return certificate, nil return certificate, nil
} }
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
newListenerSecret := r.ResourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
return ctrl.Result{}, err
}
logger.Info("Creating listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
if err := r.Create(ctx, newListenerSecret); err != nil {
logger.Error(err, "Unable to create listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
return ctrl.Result{}, err
}
logger.Info("Created listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
return ctrl.Result{Requeue: true}, nil
}
func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) { func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
data, err := autoscalingListener.Spec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) { data, err := autoscalingListener.Spec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) {
var secret corev1.Secret var secret corev1.Secret
@@ -601,24 +581,8 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a
return ctrl.Result{Requeue: true}, nil return ctrl.Result{Requeue: true}, nil
} }
func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
dataHash := hash.ComputeTemplateHash(secret.Data)
updatedMirrorSecret := mirrorSecret.DeepCopy()
updatedMirrorSecret.Labels["secret-data-hash"] = dataHash
updatedMirrorSecret.Data = secret.Data
logger.Info("Updating listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash)
if err := r.Update(ctx, updatedMirrorSecret); err != nil {
logger.Error(err, "Unable to update listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name)
return ctrl.Result{}, err
}
logger.Info("Updated listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash)
return ctrl.Result{Requeue: true}, nil
}
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) { func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
newRole := r.ResourceBuilder.newScaleSetListenerRole(autoscalingListener) newRole := r.newScaleSetListenerRole(autoscalingListener)
logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules) logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
if err := r.Create(ctx, newRole); err != nil { if err := r.Create(ctx, newRole); err != nil {
@@ -646,7 +610,7 @@ func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Contex
} }
func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) { func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) {
newRoleBinding := r.ResourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount) newRoleBinding := r.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
logger.Info("Creating listener role binding", logger.Info("Creating listener role binding",
"namespace", newRoleBinding.Namespace, "namespace", newRoleBinding.Namespace,

View File

@@ -14,7 +14,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log" logf "sigs.k8s.io/controller-runtime/pkg/log"
listenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config" ghalistenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/github/actions/fake"
. "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
kerrors "k8s.io/apimachinery/pkg/api/errors" kerrors "k8s.io/apimachinery/pkg/api/errors"
@@ -43,10 +44,17 @@ var _ = Describe("Test AutoScalingListener controller", func() {
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
rb := ResourceBuilder{
SecretResolver: secretResolver,
}
controller := &AutoscalingListenerReconciler{ controller := &AutoscalingListenerReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
Log: logf.Log, Log: logf.Log,
ResourceBuilder: rb,
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -134,37 +142,25 @@ var _ = Describe("Test AutoScalingListener controller", func() {
autoscalingListenerTestTimeout, autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer") autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
// Check if secret is created
mirrorSecret := new(corev1.Secret)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerSecretMirrorName(autoscalingListener), Namespace: autoscalingListener.Namespace}, mirrorSecret)
if err != nil {
return "", err
}
return string(mirrorSecret.Data["github_token"]), nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerTestGitHubToken), "Mirror secret should be created")
// Check if service account is created // Check if service account is created
serviceAccount := new(corev1.ServiceAccount) serviceAccount := new(corev1.ServiceAccount)
Eventually( Eventually(
func() (string, error) { func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, serviceAccount) err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, serviceAccount)
if err != nil { if err != nil {
return "", err return "", err
} }
return serviceAccount.Name, nil return serviceAccount.Name, nil
}, },
autoscalingListenerTestTimeout, autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerServiceAccountName(autoscalingListener)), "Service account should be created") autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListener.Name), "Service account should be created")
// Check if role is created // Check if role is created
role := new(rbacv1.Role) role := new(rbacv1.Role)
Eventually( Eventually(
func() ([]rbacv1.PolicyRule, error) { func() ([]rbacv1.PolicyRule, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role) err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -178,7 +174,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
roleBinding := new(rbacv1.RoleBinding) roleBinding := new(rbacv1.RoleBinding)
Eventually( Eventually(
func() (string, error) { func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding) err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -186,7 +182,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
return roleBinding.RoleRef.Name, nil return roleBinding.RoleRef.Name, nil
}, },
autoscalingListenerTestTimeout, autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerRoleName(autoscalingListener)), "Rolebinding should be created") autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListener.Name), "Rolebinding should be created")
// Check if pod is created // Check if pod is created
pod := new(corev1.Pod) pod := new(corev1.Pod)
@@ -248,7 +244,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
Eventually( Eventually(
func() bool { func() bool {
roleBinding := new(rbacv1.RoleBinding) roleBinding := new(rbacv1.RoleBinding)
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding) err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
return kerrors.IsNotFound(err) return kerrors.IsNotFound(err)
}, },
autoscalingListenerTestTimeout, autoscalingListenerTestTimeout,
@@ -259,7 +255,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
Eventually( Eventually(
func() bool { func() bool {
role := new(rbacv1.Role) role := new(rbacv1.Role)
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role) err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
return kerrors.IsNotFound(err) return kerrors.IsNotFound(err)
}, },
autoscalingListenerTestTimeout, autoscalingListenerTestTimeout,
@@ -340,7 +336,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
role := new(rbacv1.Role) role := new(rbacv1.Role)
Eventually( Eventually(
func() ([]rbacv1.PolicyRule, error) { func() ([]rbacv1.PolicyRule, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role) err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -351,7 +347,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
autoscalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated") autoscalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated")
}) })
It("It should re-create pod whenever listener container is terminated", func() { It("It should re-create pod and config secret whenever listener container is terminated", func() {
// Waiting for the pod is created // Waiting for the pod is created
pod := new(corev1.Pod) pod := new(corev1.Pod)
Eventually( Eventually(
@@ -367,7 +363,18 @@ var _ = Describe("Test AutoScalingListener controller", func() {
autoscalingListenerTestInterval, autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created") ).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
secret := new(corev1.Secret)
Eventually(
func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerConfigName(autoscalingListener), Namespace: autoscalingListener.Namespace}, secret)
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(Succeed(), "Config secret should be created")
oldPodUID := string(pod.UID) oldPodUID := string(pod.UID)
oldSecretUID := string(secret.UID)
updated := pod.DeepCopy() updated := pod.DeepCopy()
updated.Status.ContainerStatuses = []corev1.ContainerStatus{ updated.Status.ContainerStatuses = []corev1.ContainerStatus{
{ {
@@ -396,75 +403,21 @@ var _ = Describe("Test AutoScalingListener controller", func() {
autoscalingListenerTestTimeout, autoscalingListenerTestTimeout,
autoscalingListenerTestInterval, autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be re-created") ).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be re-created")
})
It("It should update mirror secrets to match secret used by AutoScalingRunnerSet", func() { // Check if config secret is re-created
// Waiting for the pod is created
pod := new(corev1.Pod)
Eventually( Eventually(
func() (string, error) { func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod) secret := new(corev1.Secret)
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerConfigName(autoscalingListener), Namespace: autoscalingListener.Namespace}, secret)
if err != nil { if err != nil {
return "", err return "", err
} }
return pod.Name, nil return string(secret.UID), nil
}, },
autoscalingListenerTestTimeout, autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created") autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldSecretUID), "Config secret should be re-created")
// Update the secret
updatedSecret := configSecret.DeepCopy()
updatedSecret.Data["github_token"] = []byte(autoscalingListenerTestGitHubToken + "_updated")
err := k8sClient.Update(ctx, updatedSecret)
Expect(err).NotTo(HaveOccurred(), "failed to update test secret")
updatedPod := pod.DeepCopy()
// Ignore status running and consult the container state
updatedPod.Status.Phase = corev1.PodRunning
updatedPod.Status.ContainerStatuses = []corev1.ContainerStatus{
{
Name: autoscalingListenerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
},
},
},
}
err = k8sClient.Status().Update(ctx, updatedPod)
Expect(err).NotTo(HaveOccurred(), "failed to update test pod to failed")
// Check if mirror secret is updated with right data
mirrorSecret := new(corev1.Secret)
Eventually(
func() (map[string][]byte, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerSecretMirrorName(autoscalingListener), Namespace: autoscalingListener.Namespace}, mirrorSecret)
if err != nil {
return nil, err
}
return mirrorSecret.Data, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(updatedSecret.Data), "Mirror secret should be updated")
// Check if we re-created a new pod
Eventually(
func() error {
latestPod := new(corev1.Pod)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, latestPod)
if err != nil {
return err
}
if latestPod.UID == pod.UID {
return fmt.Errorf("Pod should be recreated")
}
return nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(Succeed(), "Pod should be recreated")
}) })
}) })
}) })
@@ -507,10 +460,17 @@ var _ = Describe("Test AutoScalingListener customization", func() {
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
rb := ResourceBuilder{
SecretResolver: secretResolver,
}
controller := &AutoscalingListenerReconciler{ controller := &AutoscalingListenerReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
Log: logf.Log, Log: logf.Log,
ResourceBuilder: rb,
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -780,11 +740,17 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() {
ctx = context.Background() ctx = context.Background()
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
rb := ResourceBuilder{
SecretResolver: secretResolver,
}
controller := &AutoscalingListenerReconciler{ controller := &AutoscalingListenerReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
Log: logf.Log, Log: logf.Log,
ResourceBuilder: rb,
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -977,10 +943,17 @@ var _ = Describe("Test AutoScalingListener controller with template modification
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
rb := ResourceBuilder{
SecretResolver: secretResolver,
}
controller := &AutoscalingListenerReconciler{ controller := &AutoscalingListenerReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
Log: logf.Log, Log: logf.Log,
ResourceBuilder: rb,
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1073,6 +1046,12 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
rb := ResourceBuilder{
SecretResolver: secretResolver,
}
cert, err := os.ReadFile(filepath.Join( cert, err := os.ReadFile(filepath.Join(
"../../", "../../",
"github", "github",
@@ -1094,9 +1073,10 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs") Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs")
controller := &AutoscalingListenerReconciler{ controller := &AutoscalingListenerReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
Log: logf.Log, Log: logf.Log,
ResourceBuilder: rb,
} }
err = controller.SetupWithManager(mgr) err = controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1111,7 +1091,7 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
Spec: v1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ GitHubServerTLS: &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{
@@ -1147,7 +1127,7 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
Spec: v1alpha1.AutoscalingListenerSpec{ Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ GitHubServerTLS: &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{
@@ -1191,7 +1171,7 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
g.Expect(config.Data["config.json"]).ToNot(BeEmpty(), "listener configuration file should not be empty") g.Expect(config.Data["config.json"]).ToNot(BeEmpty(), "listener configuration file should not be empty")
var listenerConfig listenerconfig.Config var listenerConfig ghalistenerconfig.Config
err = json.Unmarshal(config.Data["config.json"], &listenerConfig) err = json.Unmarshal(config.Data["config.json"], &listenerConfig)
g.Expect(err).NotTo(HaveOccurred(), "failed to parse listener configuration file") g.Expect(err).NotTo(HaveOccurred(), "failed to parse listener configuration file")

View File

@@ -99,7 +99,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, client.IgnoreNotFound(err) return ctrl.Result{}, client.IgnoreNotFound(err)
} }
if !autoscalingRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() { if !autoscalingRunnerSet.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) { if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) {
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
@@ -151,7 +151,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
if autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion] != build.Version { if !v1alpha1.IsVersionAllowed(autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], build.Version) {
if err := r.Delete(ctx, autoscalingRunnerSet); err != nil { if err := r.Delete(ctx, autoscalingRunnerSet); err != nil {
log.Error(err, "Failed to delete autoscaling runner set on version mismatch", log.Error(err, "Failed to delete autoscaling runner set on version mismatch",
"buildVersion", build.Version, "buildVersion", build.Version,
@@ -207,14 +207,6 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log) return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
} }
secret := new(corev1.Secret)
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, secret); err != nil {
log.Error(err, "Failed to find GitHub config secret.",
"namespace", autoscalingRunnerSet.Namespace,
"name", autoscalingRunnerSet.Spec.GitHubConfigSecret)
return ctrl.Result{}, err
}
existingRunnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet) existingRunnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet)
if err != nil { if err != nil {
log.Error(err, "Failed to list existing ephemeral runner sets") log.Error(err, "Failed to list existing ephemeral runner sets")
@@ -332,7 +324,7 @@ func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, au
err = r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, &listener) err = r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, &listener)
switch { switch {
case err == nil: case err == nil:
if listener.ObjectMeta.DeletionTimestamp.IsZero() { if listener.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener") logger.Info("Deleting the listener")
if err := r.Delete(ctx, &listener); err != nil { if err := r.Delete(ctx, &listener); err != nil {
return false, fmt.Errorf("failed to delete listener: %w", err) return false, fmt.Errorf("failed to delete listener: %w", err)
@@ -369,7 +361,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
for i := range oldRunnerSets { for i := range oldRunnerSets {
rs := &oldRunnerSets[i] rs := &oldRunnerSets[i]
// already deleted but contains finalizer so it still exists // already deleted but contains finalizer so it still exists
if !rs.ObjectMeta.DeletionTimestamp.IsZero() { if !rs.DeletionTimestamp.IsZero() {
logger.Info("Skip ephemeral runner set since it is already marked for deletion", "name", rs.Name) logger.Info("Skip ephemeral runner set since it is already marked for deletion", "name", rs.Name)
continue continue
} }
@@ -402,12 +394,12 @@ func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) { func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
logger.Info("Creating a new runner scale set") logger.Info("Creating a new runner scale set")
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet) actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
if len(autoscalingRunnerSet.Spec.RunnerScaleSetName) == 0 { if len(autoscalingRunnerSet.Spec.RunnerScaleSetName) == 0 {
autoscalingRunnerSet.Spec.RunnerScaleSetName = autoscalingRunnerSet.Name autoscalingRunnerSet.Spec.RunnerScaleSetName = autoscalingRunnerSet.Name
} }
if err != nil { if err != nil {
logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set") logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set", "error", err.Error())
return ctrl.Result{}, err return ctrl.Result{}, err
} }
@@ -498,7 +490,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
return ctrl.Result{}, err return ctrl.Result{}, err
} }
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet) actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
if err != nil { if err != nil {
logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set") logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set")
return ctrl.Result{}, err return ctrl.Result{}, err
@@ -546,7 +538,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet) actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
if err != nil { if err != nil {
logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set") logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set")
return ctrl.Result{}, err return ctrl.Result{}, err
@@ -597,7 +589,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
return nil return nil
} }
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet) actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
if err != nil { if err != nil {
logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set") logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set")
return err return err
@@ -622,7 +614,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
} }
func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) { func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) {
desiredRunnerSet, err := r.ResourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet) desiredRunnerSet, err := r.newEphemeralRunnerSet(autoscalingRunnerSet)
if err != nil { if err != nil {
log.Error(err, "Could not create EphemeralRunnerSet") log.Error(err, "Could not create EphemeralRunnerSet")
return ctrl.Result{}, err return ctrl.Result{}, err
@@ -651,7 +643,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
}) })
} }
autoscalingListener, err := r.ResourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets) autoscalingListener, err := r.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
if err != nil { if err != nil {
log.Error(err, "Could not create AutoscalingListener spec") log.Error(err, "Could not create AutoscalingListener spec")
return ctrl.Result{}, err return ctrl.Result{}, err
@@ -676,74 +668,6 @@ func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Con
return &EphemeralRunnerSets{list: list}, nil return &EphemeralRunnerSets{list: list}, nil
} }
func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (actions.ActionsService, error) {
var configSecret corev1.Secret
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, &configSecret); err != nil {
return nil, fmt.Errorf("failed to find GitHub config secret: %w", err)
}
opts, err := r.actionsClientOptionsFor(ctx, autoscalingRunnerSet)
if err != nil {
return nil, fmt.Errorf("failed to get actions client options: %w", err)
}
return r.ActionsClient.GetClientFromSecret(
ctx,
autoscalingRunnerSet.Spec.GitHubConfigUrl,
autoscalingRunnerSet.Namespace,
configSecret.Data,
opts...,
)
}
func (r *AutoscalingRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) ([]actions.ClientOption, error) {
var options []actions.ClientOption
if autoscalingRunnerSet.Spec.Proxy != nil {
proxyFunc, err := autoscalingRunnerSet.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
var secret corev1.Secret
err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: s}, &secret)
if err != nil {
return nil, fmt.Errorf("failed to get proxy secret %s: %w", s, err)
}
return &secret, nil
})
if err != nil {
return nil, fmt.Errorf("failed to get proxy func: %w", err)
}
options = append(options, actions.WithProxy(proxyFunc))
}
tlsConfig := autoscalingRunnerSet.Spec.GitHubServerTLS
if tlsConfig != nil {
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
var configmap corev1.ConfigMap
err := r.Get(
ctx,
types.NamespacedName{
Namespace: autoscalingRunnerSet.Namespace,
Name: name,
},
&configmap,
)
if err != nil {
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
}
return []byte(configmap.Data[key]), nil
})
if err != nil {
return nil, fmt.Errorf("failed to get tls config: %w", err)
}
options = append(options, actions.WithRootCAs(pool))
}
return options, nil
}
// SetupWithManager sets up the controller with the Manager. // SetupWithManager sets up the controller with the Manager.
func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr). return ctrl.NewControllerManagedBy(mgr).

View File

@@ -70,7 +70,12 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
Log: logf.Log, Log: logf.Log,
ControllerNamespace: autoscalingNS.Name, ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(), ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: fake.NewMultiClient(),
},
},
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -280,10 +285,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
// This should trigger re-creation of EphemeralRunnerSet and Listener // This should trigger re-creation of EphemeralRunnerSet and Listener
patched := autoscalingRunnerSet.DeepCopy() patched := autoscalingRunnerSet.DeepCopy()
patched.Spec.Template.Spec.PriorityClassName = "test-priority-class" patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
if patched.ObjectMeta.Annotations == nil { if patched.Annotations == nil {
patched.ObjectMeta.Annotations = make(map[string]string) patched.Annotations = make(map[string]string)
} }
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "test-hash" patched.Annotations[annotationKeyValuesHash] = "test-hash"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
autoscalingRunnerSet = patched.DeepCopy() autoscalingRunnerSet = patched.DeepCopy()
@@ -383,7 +388,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
Expect(err).NotTo(HaveOccurred(), "failed to get Listener") Expect(err).NotTo(HaveOccurred(), "failed to get Listener")
patched = autoscalingRunnerSet.DeepCopy() patched = autoscalingRunnerSet.DeepCopy()
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "hash-changes" patched.Annotations[annotationKeyValuesHash] = "hash-changes"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet)) err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet") Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
@@ -546,10 +551,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
// Patch the AutoScalingRunnerSet image which should trigger // Patch the AutoScalingRunnerSet image which should trigger
// the recreation of the Listener and EphemeralRunnerSet // the recreation of the Listener and EphemeralRunnerSet
patched := autoscalingRunnerSet.DeepCopy() patched := autoscalingRunnerSet.DeepCopy()
if patched.ObjectMeta.Annotations == nil { if patched.Annotations == nil {
patched.ObjectMeta.Annotations = make(map[string]string) patched.Annotations = make(map[string]string)
} }
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "testgroup2" patched.Annotations[annotationKeyValuesHash] = "testgroup2"
patched.Spec.Template.Spec = corev1.PodSpec{ patched.Spec.Template.Spec = corev1.PodSpec{
Containers: []corev1.Container{ Containers: []corev1.Container{
{ {
@@ -677,33 +682,40 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
multiClient := fake.NewMultiClient(
fake.WithDefaultClient(
fake.NewFakeClient(
fake.WithUpdateRunnerScaleSet(
&actions.RunnerScaleSet{
Id: 1,
Name: "testset_update",
RunnerGroupId: 1,
RunnerGroupName: "testgroup",
Labels: []actions.Label{{Type: "test", Name: "test"}},
RunnerSetting: actions.RunnerSetting{},
CreatedOn: time.Now(),
RunnerJitConfigUrl: "test.test.test",
Statistics: nil,
},
nil,
),
),
nil,
),
)
controller := &AutoscalingRunnerSetReconciler{ controller := &AutoscalingRunnerSetReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
Log: logf.Log, Log: logf.Log,
ControllerNamespace: autoscalingNS.Name, ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient( ResourceBuilder: ResourceBuilder{
fake.WithDefaultClient( SecretResolver: &SecretResolver{
fake.NewFakeClient( k8sClient: k8sClient,
fake.WithUpdateRunnerScaleSet( multiClient: multiClient,
&actions.RunnerScaleSet{ },
Id: 1, },
Name: "testset_update",
RunnerGroupId: 1,
RunnerGroupName: "testgroup",
Labels: []actions.Label{{Type: "test", Name: "test"}},
RunnerSetting: actions.RunnerSetting{},
CreatedOn: time.Now(),
RunnerJitConfigUrl: "test.test.test",
Statistics: nil,
},
nil,
),
),
nil,
),
),
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -818,7 +830,12 @@ var _ = Describe("Test AutoscalingController creation failures", Ordered, func()
Log: logf.Log, Log: logf.Log,
ControllerNamespace: autoscalingNS.Name, ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(), ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: fake.NewMultiClient(),
},
},
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -875,7 +892,7 @@ var _ = Describe("Test AutoscalingController creation failures", Ordered, func()
autoscalingRunnerSetTestInterval, autoscalingRunnerSetTestInterval,
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer") ).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
ars.ObjectMeta.Annotations = make(map[string]string) ars.Annotations = make(map[string]string)
err = k8sClient.Update(ctx, ars) err = k8sClient.Update(ctx, ars)
Expect(err).NotTo(HaveOccurred(), "Update autoscaling runner set without annotation should be successful") Expect(err).NotTo(HaveOccurred(), "Update autoscaling runner set without annotation should be successful")
@@ -937,14 +954,19 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
ctx = context.Background() ctx = context.Background()
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient) autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
multiClient := actions.NewMultiClient(logr.Discard())
controller = &AutoscalingRunnerSetReconciler{ controller = &AutoscalingRunnerSetReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
Log: logf.Log, Log: logf.Log,
ControllerNamespace: autoscalingNS.Name, ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: actions.NewMultiClient(logr.Discard()), ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: multiClient,
},
},
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
@@ -1127,7 +1149,12 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
Log: logf.Log, Log: logf.Log,
ControllerNamespace: autoscalingNS.Name, ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(), ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: fake.NewMultiClient(),
},
},
} }
err = controller.SetupWithManager(mgr) err = controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1136,7 +1163,10 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
}) })
It("should be able to make requests to a server using root CAs", func() { It("should be able to make requests to a server using root CAs", func() {
controller.ActionsClient = actions.NewMultiClient(logr.Discard()) controller.SecretResolver = &SecretResolver{
k8sClient: k8sClient,
multiClient: actions.NewMultiClient(logr.Discard()),
}
certsFolder := filepath.Join( certsFolder := filepath.Join(
"../../", "../../",
@@ -1171,7 +1201,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
Spec: v1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: server.ConfigURLForOrg("my-org"), GitHubConfigUrl: server.ConfigURLForOrg("my-org"),
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ GitHubServerTLS: &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{
@@ -1224,7 +1254,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
Spec: v1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ GitHubServerTLS: &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{
@@ -1288,7 +1318,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
Spec: v1alpha1.AutoscalingRunnerSetSpec{ Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo", GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ GitHubServerTLS: &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{
@@ -1361,7 +1391,12 @@ var _ = Describe("Test external permissions cleanup", Ordered, func() {
Log: logf.Log, Log: logf.Log,
ControllerNamespace: autoscalingNS.Name, ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(), ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: fake.NewMultiClient(),
},
},
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1519,7 +1554,12 @@ var _ = Describe("Test external permissions cleanup", Ordered, func() {
Log: logf.Log, Log: logf.Log,
ControllerNamespace: autoscalingNS.Name, ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(), ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: fake.NewMultiClient(),
},
},
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1727,7 +1767,12 @@ var _ = Describe("Test resource version and build version mismatch", func() {
Log: logf.Log, Log: logf.Log,
ControllerNamespace: autoscalingNS.Name, ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc", DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(), ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: fake.NewMultiClient(),
},
},
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")

View File

@@ -21,6 +21,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
"strconv"
"time" "time"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
@@ -28,6 +29,7 @@ import (
"github.com/go-logr/logr" "github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors" kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime" ctrl "sigs.k8s.io/controller-runtime"
@@ -44,12 +46,24 @@ const (
// EphemeralRunnerReconciler reconciles a EphemeralRunner object // EphemeralRunnerReconciler reconciles a EphemeralRunner object
type EphemeralRunnerReconciler struct { type EphemeralRunnerReconciler struct {
client.Client client.Client
Log logr.Logger Log logr.Logger
Scheme *runtime.Scheme Scheme *runtime.Scheme
ActionsClient actions.MultiClient
ResourceBuilder ResourceBuilder
} }
// precompute backoff durations for failed ephemeral runners
// the len(failedRunnerBackoff) must be equal to maxFailures + 1
var failedRunnerBackoff = []time.Duration{
0,
5 * time.Second,
10 * time.Second,
20 * time.Second,
40 * time.Second,
80 * time.Second,
}
const maxFailures = 5
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get;update;patch // +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/finalizers,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/finalizers,verbs=get;list;watch;create;update;patch;delete
@@ -70,7 +84,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, client.IgnoreNotFound(err) return ctrl.Result{}, client.IgnoreNotFound(err)
} }
if !ephemeralRunner.ObjectMeta.DeletionTimestamp.IsZero() { if !ephemeralRunner.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) { if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
@@ -166,69 +180,119 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
if ephemeralRunner.Status.RunnerId == 0 {
log.Info("Creating new ephemeral runner registration and updating status with runner config")
if r, err := r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log); r != nil {
return *r, err
}
}
secret := new(corev1.Secret) secret := new(corev1.Secret)
if err := r.Get(ctx, req.NamespacedName, secret); err != nil { if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
if !kerrors.IsNotFound(err) { if !kerrors.IsNotFound(err) {
log.Error(err, "Failed to fetch secret") log.Error(err, "Failed to fetch secret")
return ctrl.Result{}, err return ctrl.Result{}, err
} }
// create secret if not created
log.Info("Creating new ephemeral runner secret for jitconfig.")
if r, err := r.createSecret(ctx, ephemeralRunner, log); r != nil {
return *r, err
}
// Retry to get the secret that was just created. jitConfig, err := r.createRunnerJitConfig(ctx, ephemeralRunner, log)
// Otherwise, even though we want to continue to create the pod, switch {
// it fails due to the missing secret resulting in an invalid pod spec. case err == nil:
if err := r.Get(ctx, req.NamespacedName, secret); err != nil { // create secret if not created
log.Error(err, "Failed to fetch secret") log.Info("Creating new ephemeral runner secret for jitconfig.")
jitSecret, err := r.createSecret(ctx, ephemeralRunner, jitConfig, log)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to create secret: %w", err)
}
log.Info("Created new ephemeral runner secret for jitconfig.")
secret = jitSecret
case errors.Is(err, retryableError):
log.Info("Encountered retryable error, requeueing", "error", err.Error())
return ctrl.Result{Requeue: true}, nil
case errors.Is(err, fatalError):
log.Info("JIT config cannot be created for this ephemeral runner, issuing delete", "error", err.Error())
if err := r.Delete(ctx, ephemeralRunner); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to delete the ephemeral runner: %w", err)
}
log.Info("Request to delete ephemeral runner has been issued")
return ctrl.Result{}, nil
default:
log.Error(err, "Failed to create ephemeral runners secret", "error", err.Error())
return ctrl.Result{}, err return ctrl.Result{}, err
} }
} }
if ephemeralRunner.Status.RunnerId == 0 {
log.Info("Updating ephemeral runner status with runnerId and runnerName")
runnerID, err := strconv.Atoi(string(secret.Data["runnerId"]))
if err != nil {
log.Error(err, "Runner config secret is corrupted: missing runnerId")
log.Info("Deleting corrupted runner config secret")
if err := r.Delete(ctx, secret); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to delete the corrupted runner config secret")
}
log.Info("Corrupted runner config secret has been deleted")
return ctrl.Result{Requeue: true}, nil
}
runnerName := string(secret.Data["runnerName"])
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.RunnerId = runnerID
obj.Status.RunnerName = runnerName
}); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %w", err)
}
ephemeralRunner.Status.RunnerId = runnerID
ephemeralRunner.Status.RunnerName = runnerName
log.Info("Updated ephemeral runner status with runnerId and runnerName")
}
if len(ephemeralRunner.Status.Failures) > maxFailures {
log.Info(fmt.Sprintf("EphemeralRunner has failed more than %d times. Deleting ephemeral runner so it can be re-created", maxFailures))
if err := r.Delete(ctx, ephemeralRunner); err != nil {
log.Error(fmt.Errorf("failed to delete ephemeral runner after %d failures: %w", maxFailures, err), "Failed to delete ephemeral runner")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
now := metav1.Now()
lastFailure := ephemeralRunner.Status.LastFailure()
backoffDuration := failedRunnerBackoff[len(ephemeralRunner.Status.Failures)]
nextReconciliation := lastFailure.Add(backoffDuration)
if !lastFailure.IsZero() && now.Before(&metav1.Time{Time: nextReconciliation}) {
requeueAfter := nextReconciliation.Sub(now.Time)
log.Info("Backing off the next reconciliation due to failure",
"lastFailure", lastFailure,
"nextReconciliation", nextReconciliation,
"requeueAfter", requeueAfter,
)
return ctrl.Result{
Requeue: true,
RequeueAfter: requeueAfter,
}, nil
}
pod := new(corev1.Pod) pod := new(corev1.Pod)
if err := r.Get(ctx, req.NamespacedName, pod); err != nil { if err := r.Get(ctx, req.NamespacedName, pod); err != nil {
switch { if !kerrors.IsNotFound(err) {
case !kerrors.IsNotFound(err):
log.Error(err, "Failed to fetch the pod") log.Error(err, "Failed to fetch the pod")
return ctrl.Result{}, err return ctrl.Result{}, err
}
log.Info("Ephemeral runner pod does not exist. Creating new ephemeral runner")
case len(ephemeralRunner.Status.Failures) > 5: result, err := r.createPod(ctx, ephemeralRunner, secret, log)
log.Info("EphemeralRunner has failed more than 5 times. Marking it as failed") switch {
errMessage := fmt.Sprintf("Pod has failed to start more than 5 times: %s", pod.Status.Message) case err == nil:
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonTooManyPodFailures, log); err != nil { return result, nil
case kerrors.IsAlreadyExists(err):
log.Info("Runner pod already exists. Waiting for the pod event to be received")
return ctrl.Result{Requeue: true, RequeueAfter: 5 * time.Second}, nil
case kerrors.IsInvalid(err) || kerrors.IsForbidden(err):
log.Error(err, "Failed to create a pod due to unrecoverable failure")
errMessage := fmt.Sprintf("Failed to create the pod: %v", err)
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonInvalidPodFailure, log); err != nil {
log.Error(err, "Failed to set ephemeral runner to phase Failed") log.Error(err, "Failed to set ephemeral runner to phase Failed")
return ctrl.Result{}, err return ctrl.Result{}, err
} }
return ctrl.Result{}, nil return ctrl.Result{}, nil
default: default:
// Pod was not found. Create if the pod has never been created log.Error(err, "Failed to create the pod")
log.Info("Creating new EphemeralRunner pod.") return ctrl.Result{}, err
result, err := r.createPod(ctx, ephemeralRunner, secret, log)
switch {
case err == nil:
return result, nil
case kerrors.IsInvalid(err) || kerrors.IsForbidden(err):
log.Error(err, "Failed to create a pod due to unrecoverable failure")
errMessage := fmt.Sprintf("Failed to create the pod: %v", err)
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonInvalidPodFailure, log); err != nil {
log.Error(err, "Failed to set ephemeral runner to phase Failed")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
default:
log.Error(err, "Failed to create the pod")
return ctrl.Result{}, err
}
} }
} }
@@ -262,34 +326,28 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
case cs.State.Terminated.ExitCode != 0: // failed case cs.State.Terminated.ExitCode != 0: // failed
log.Info("Ephemeral runner container failed", "exitCode", cs.State.Terminated.ExitCode) log.Info("Ephemeral runner container failed", "exitCode", cs.State.Terminated.ExitCode)
if ephemeralRunner.HasJob() {
log.Error(
errors.New("ephemeral runner has a job assigned, but the pod has failed"),
"Ephemeral runner either has faulty entrypoint or something external killing the runner",
)
log.Info("Deleting the ephemeral runner that has a job assigned but the pod has failed")
if err := r.Delete(ctx, ephemeralRunner); err != nil {
log.Error(err, "Failed to delete the ephemeral runner that has a job assigned but the pod has failed")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil { if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
log.Error(err, "Failed to delete runner pod on failure") log.Error(err, "Failed to delete runner pod on failure")
return ctrl.Result{}, err return ctrl.Result{}, err
} }
return ctrl.Result{}, nil return ctrl.Result{}, nil
default: default: // succeeded
// pod succeeded. We double-check with the service if the runner exists. log.Info("Ephemeral runner has finished successfully, deleting ephemeral runner", "exitCode", cs.State.Terminated.ExitCode)
// The reason is that image can potentially finish with status 0, but not pick up the job. if err := r.Delete(ctx, ephemeralRunner); err != nil {
existsInService, err := r.runnerRegisteredWithService(ctx, ephemeralRunner.DeepCopy(), log) log.Error(err, "Failed to delete ephemeral runner after successful completion")
if err != nil {
log.Error(err, "Failed to check if runner is registered with the service")
return ctrl.Result{}, err
}
if !existsInService {
// the runner does not exist in the service, so it must be done
log.Info("Ephemeral runner has finished since it does not exist in the service anymore")
if err := r.markAsFinished(ctx, ephemeralRunner, log); err != nil {
log.Error(err, "Failed to mark ephemeral runner as finished")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// The runner still exists. This can happen if the pod exited with 0 but fails to start
log.Info("Ephemeral runner pod has finished, but the runner still exists in the service. Deleting the pod to restart it.")
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
log.Error(err, "failed to delete a pod that still exists in the service")
return ctrl.Result{}, err return ctrl.Result{}, err
} }
return ctrl.Result{}, nil return ctrl.Result{}, nil
@@ -319,7 +377,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
err := r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, pod) err := r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, pod)
switch { switch {
case err == nil: case err == nil:
if pod.ObjectMeta.DeletionTimestamp.IsZero() { if pod.DeletionTimestamp.IsZero() {
log.Info("Deleting the runner pod") log.Info("Deleting the runner pod")
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) { if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
return fmt.Errorf("failed to delete pod: %w", err) return fmt.Errorf("failed to delete pod: %w", err)
@@ -339,7 +397,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, secret) err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, secret)
switch { switch {
case err == nil: case err == nil:
if secret.ObjectMeta.DeletionTimestamp.IsZero() { if secret.DeletionTimestamp.IsZero() {
log.Info("Deleting the jitconfig secret") log.Info("Deleting the jitconfig secret")
if err := r.Delete(ctx, secret); err != nil && !kerrors.IsNotFound(err) { if err := r.Delete(ctx, secret); err != nil && !kerrors.IsNotFound(err) {
return fmt.Errorf("failed to delete secret: %w", err) return fmt.Errorf("failed to delete secret: %w", err)
@@ -393,7 +451,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
var errs []error var errs []error
for i := range runnerLinkedPodList.Items { for i := range runnerLinkedPodList.Items {
linkedPod := &runnerLinkedPodList.Items[i] linkedPod := &runnerLinkedPodList.Items[i]
if !linkedPod.ObjectMeta.DeletionTimestamp.IsZero() { if !linkedPod.DeletionTimestamp.IsZero() {
continue continue
} }
@@ -409,7 +467,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error { func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
runnerLinkedLabels := client.MatchingLabels( runnerLinkedLabels := client.MatchingLabels(
map[string]string{ map[string]string{
"runner-pod": ephemeralRunner.ObjectMeta.Name, "runner-pod": ephemeralRunner.Name,
}, },
) )
var runnerLinkedSecretList corev1.SecretList var runnerLinkedSecretList corev1.SecretList
@@ -427,7 +485,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
var errs []error var errs []error
for i := range runnerLinkedSecretList.Items { for i := range runnerLinkedSecretList.Items {
s := &runnerLinkedSecretList.Items[i] s := &runnerLinkedSecretList.Items[i]
if !s.ObjectMeta.DeletionTimestamp.IsZero() { if !s.DeletionTimestamp.IsZero() {
continue continue
} }
@@ -459,22 +517,10 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
return nil return nil
} }
func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
log.Info("Updating ephemeral runner status to Finished")
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = corev1.PodSucceeded
}); err != nil {
return fmt.Errorf("failed to update ephemeral runner with status finished: %w", err)
}
log.Info("EphemeralRunner status is marked as Finished")
return nil
}
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count. // deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
// It should not be responsible for setting the status to Failed. // It should not be responsible for setting the status to Failed.
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error { func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
if pod.ObjectMeta.DeletionTimestamp.IsZero() { if pod.DeletionTimestamp.IsZero() {
log.Info("Deleting the ephemeral runner pod", "podId", pod.UID) log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) { if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
return fmt.Errorf("failed to delete pod with status failed: %w", err) return fmt.Errorf("failed to delete pod with status failed: %w", err)
@@ -484,9 +530,9 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
log.Info("Updating ephemeral runner status to track the failure count") log.Info("Updating ephemeral runner status to track the failure count")
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) { if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
if obj.Status.Failures == nil { if obj.Status.Failures == nil {
obj.Status.Failures = make(map[string]bool) obj.Status.Failures = make(map[string]metav1.Time)
} }
obj.Status.Failures[string(pod.UID)] = true obj.Status.Failures[string(pod.UID)] = metav1.Now()
obj.Status.Ready = false obj.Status.Ready = false
obj.Status.Reason = pod.Status.Reason obj.Status.Reason = pod.Status.Reason
obj.Status.Message = pod.Status.Message obj.Status.Message = pod.Status.Message
@@ -498,14 +544,12 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
return nil return nil
} }
// updateStatusWithRunnerConfig fetches runtime configuration needed by the runner func (r *EphemeralRunnerReconciler) createRunnerJitConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*actions.RunnerScaleSetJitRunnerConfig, error) {
// This method should always set .status.runnerId and .status.runnerJITConfig
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
// Runner is not registered with the service. We need to register it first // Runner is not registered with the service. We need to register it first
log.Info("Creating ephemeral runner JIT config") log.Info("Creating ephemeral runner JIT config")
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunner) actionsClient, err := r.GetActionsService(ctx, ephemeralRunner)
if err != nil { if err != nil {
return &ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %w", err) return nil, fmt.Errorf("failed to get actions client for generating JIT config: %w", err)
} }
jitSettings := &actions.RunnerScaleSetJitRunnerSetting{ jitSettings := &actions.RunnerScaleSetJitRunnerSetting{
@@ -520,74 +564,52 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
} }
jitConfig, err := actionsClient.GenerateJitRunnerConfig(ctx, jitSettings, ephemeralRunner.Spec.RunnerScaleSetId) jitConfig, err := actionsClient.GenerateJitRunnerConfig(ctx, jitSettings, ephemeralRunner.Spec.RunnerScaleSetId)
if err == nil { // if NO error
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
return jitConfig, nil
}
actionsError := &actions.ActionsError{}
if !errors.As(err, &actionsError) {
return nil, fmt.Errorf("failed to generate JIT config with generic error: %w", err)
}
if actionsError.StatusCode != http.StatusConflict ||
!actionsError.IsException("AgentExistsException") {
return nil, fmt.Errorf("failed to generate JIT config with Actions service error: %w", err)
}
// If the runner with the name we want already exists it means:
// - We might have a name collision.
// - Our previous reconciliation loop failed to update the
// status with the runnerId and runnerJITConfig after the `GenerateJitRunnerConfig`
// created the runner registration on the service.
// We will try to get the runner and see if it's belong to this AutoScalingRunnerSet,
// if so, we can simply delete the runner registration and create a new one.
log.Info("Getting runner jit config failed with conflict error, trying to get the runner by name", "runnerName", ephemeralRunner.Name)
existingRunner, err := actionsClient.GetRunnerByName(ctx, ephemeralRunner.Name)
if err != nil { if err != nil {
actionsError := &actions.ActionsError{} return nil, fmt.Errorf("failed to get runner by name: %w", err)
if !errors.As(err, &actionsError) { }
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %w", err)
}
if actionsError.StatusCode != http.StatusConflict || if existingRunner == nil {
!actionsError.IsException("AgentExistsException") { log.Info("Runner with the same name does not exist anymore, re-queuing the reconciliation")
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %w", err) return nil, fmt.Errorf("%w: runner existed, retry configuration", retryableError)
} }
// If the runner with the name we want already exists it means: log.Info("Found the runner with the same name", "runnerId", existingRunner.Id, "runnerScaleSetId", existingRunner.RunnerScaleSetId)
// - We might have a name collision. if existingRunner.RunnerScaleSetId == ephemeralRunner.Spec.RunnerScaleSetId {
// - Our previous reconciliation loop failed to update the log.Info("Removing the runner with the same name")
// status with the runnerId and runnerJITConfig after the `GenerateJitRunnerConfig` err := actionsClient.RemoveRunner(ctx, int64(existingRunner.Id))
// created the runner registration on the service.
// We will try to get the runner and see if it's belong to this AutoScalingRunnerSet,
// if so, we can simply delete the runner registration and create a new one.
log.Info("Getting runner jit config failed with conflict error, trying to get the runner by name", "runnerName", ephemeralRunner.Name)
existingRunner, err := actionsClient.GetRunnerByName(ctx, ephemeralRunner.Name)
if err != nil { if err != nil {
return &ctrl.Result{}, fmt.Errorf("failed to get runner by name: %w", err) return nil, fmt.Errorf("failed to remove runner from the service: %w", err)
} }
if existingRunner == nil { log.Info("Removed the runner with the same name, re-queuing the reconciliation")
log.Info("Runner with the same name does not exist, re-queuing the reconciliation") return nil, fmt.Errorf("%w: runner existed belonging to the scale set, retry configuration", retryableError)
return &ctrl.Result{Requeue: true}, nil
}
log.Info("Found the runner with the same name", "runnerId", existingRunner.Id, "runnerScaleSetId", existingRunner.RunnerScaleSetId)
if existingRunner.RunnerScaleSetId == ephemeralRunner.Spec.RunnerScaleSetId {
log.Info("Removing the runner with the same name")
err := actionsClient.RemoveRunner(ctx, int64(existingRunner.Id))
if err != nil {
return &ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %w", err)
}
log.Info("Removed the runner with the same name, re-queuing the reconciliation")
return &ctrl.Result{Requeue: true}, nil
}
// TODO: Do we want to mark the ephemeral runner as failed, and let EphemeralRunnerSet to clean it up, so we can recover from this situation?
// The situation is that the EphemeralRunner's name is already used by something else to register a runner, and we can't take the control back.
return &ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %w", err)
}
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
log.Info("Updating ephemeral runner status with runnerId and runnerJITConfig")
err = patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.RunnerId = jitConfig.Runner.Id
obj.Status.RunnerName = jitConfig.Runner.Name
obj.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
})
if err != nil {
return &ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %w", err)
} }
// We want to continue without a requeue for faster pod creation. return nil, fmt.Errorf("%w: runner with the same name but doesn't belong to this RunnerScaleSet: %w", fatalError, err)
//
// To do so, we update the status in-place, so that both continuing the loop and
// and requeuing and skipping updateStatusWithRunnerConfig in the next loop, will
// have the same effect.
ephemeralRunner.Status.RunnerId = jitConfig.Runner.Id
ephemeralRunner.Status.RunnerName = jitConfig.Runner.Name
ephemeralRunner.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
log.Info("Updated ephemeral runner status with runnerId and runnerJITConfig")
return nil, nil
} }
func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) { func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) {
@@ -640,7 +662,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
} }
log.Info("Creating new pod for ephemeral runner") log.Info("Creating new pod for ephemeral runner")
newPod := r.ResourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...) newPod := r.newEphemeralRunnerPod(ctx, runner, secret, envs...)
if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil { if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil {
log.Error(err, "Failed to set controller reference to a new pod") log.Error(err, "Failed to set controller reference to a new pod")
@@ -663,21 +685,21 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) { func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, jitConfig *actions.RunnerScaleSetJitRunnerConfig, log logr.Logger) (*corev1.Secret, error) {
log.Info("Creating new secret for ephemeral runner") log.Info("Creating new secret for ephemeral runner")
jitSecret := r.ResourceBuilder.newEphemeralRunnerJitSecret(runner) jitSecret := r.newEphemeralRunnerJitSecret(runner, jitConfig)
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil { if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %w", err) return nil, fmt.Errorf("failed to set controller reference: %w", err)
} }
log.Info("Created new secret spec for ephemeral runner") log.Info("Created new secret spec for ephemeral runner")
if err := r.Create(ctx, jitSecret); err != nil { if err := r.Create(ctx, jitSecret); err != nil {
return &ctrl.Result{}, fmt.Errorf("failed to create jit secret: %w", err) return nil, fmt.Errorf("failed to create jit secret: %w", err)
} }
log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name) log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name)
return nil, nil return jitSecret, nil
} }
// updateRunStatusFromPod is responsible for updating non-exiting statuses. // updateRunStatusFromPod is responsible for updating non-exiting statuses.
@@ -727,104 +749,8 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
return nil return nil
} }
func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) (actions.ActionsService, error) {
secret := new(corev1.Secret)
if err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: runner.Spec.GitHubConfigSecret}, secret); err != nil {
return nil, fmt.Errorf("failed to get secret: %w", err)
}
opts, err := r.actionsClientOptionsFor(ctx, runner)
if err != nil {
return nil, fmt.Errorf("failed to get actions client options: %w", err)
}
return r.ActionsClient.GetClientFromSecret(
ctx,
runner.Spec.GitHubConfigUrl,
runner.Namespace,
secret.Data,
opts...,
)
}
func (r *EphemeralRunnerReconciler) actionsClientOptionsFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) ([]actions.ClientOption, error) {
var opts []actions.ClientOption
if runner.Spec.Proxy != nil {
proxyFunc, err := runner.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
var secret corev1.Secret
err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: s}, &secret)
if err != nil {
return nil, fmt.Errorf("failed to get proxy secret %s: %w", s, err)
}
return &secret, nil
})
if err != nil {
return nil, fmt.Errorf("failed to get proxy func: %w", err)
}
opts = append(opts, actions.WithProxy(proxyFunc))
}
tlsConfig := runner.Spec.GitHubServerTLS
if tlsConfig != nil {
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
var configmap corev1.ConfigMap
err := r.Get(
ctx,
types.NamespacedName{
Namespace: runner.Namespace,
Name: name,
},
&configmap,
)
if err != nil {
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
}
return []byte(configmap.Data[key]), nil
})
if err != nil {
return nil, fmt.Errorf("failed to get tls config: %w", err)
}
opts = append(opts, actions.WithRootCAs(pool))
}
return opts, nil
}
// runnerRegisteredWithService checks if the runner is still registered with the service
// Returns found=false and err=nil if ephemeral runner does not exist in GitHub service and should be deleted
func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (found bool, err error) {
actionsClient, err := r.actionsClientFor(ctx, runner)
if err != nil {
return false, fmt.Errorf("failed to get Actions client for ScaleSet: %w", err)
}
log.Info("Checking if runner exists in GitHub service", "runnerId", runner.Status.RunnerId)
_, err = actionsClient.GetRunner(ctx, int64(runner.Status.RunnerId))
if err != nil {
actionsError := &actions.ActionsError{}
if !errors.As(err, &actionsError) {
return false, err
}
if actionsError.StatusCode != http.StatusNotFound ||
!actionsError.IsException("AgentNotFoundException") {
return false, fmt.Errorf("failed to check if runner exists in GitHub service: %w", err)
}
log.Info("Runner does not exist in GitHub service", "runnerId", runner.Status.RunnerId)
return false, nil
}
log.Info("Runner exists in GitHub service", "runnerId", runner.Status.RunnerId)
return true, nil
}
func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error { func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
client, err := r.actionsClientFor(ctx, ephemeralRunner) client, err := r.GetActionsService(ctx, ephemeralRunner)
if err != nil { if err != nil {
return fmt.Errorf("failed to get actions client for runner: %w", err) return fmt.Errorf("failed to get actions client for runner: %w", err)
} }

View File

@@ -30,7 +30,7 @@ import (
const ( const (
ephemeralRunnerTimeout = time.Second * 20 ephemeralRunnerTimeout = time.Second * 20
ephemeralRunnerInterval = time.Millisecond * 250 ephemeralRunnerInterval = time.Millisecond * 10
runnerImage = "ghcr.io/actions/actions-runner:latest" runnerImage = "ghcr.io/actions/actions-runner:latest"
) )
@@ -107,10 +107,15 @@ var _ = Describe("EphemeralRunner", func() {
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
controller = &EphemeralRunnerReconciler{ controller = &EphemeralRunnerReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
Log: logf.Log, Log: logf.Log,
ActionsClient: fake.NewMultiClient(), ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: fake.NewMultiClient(),
},
},
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
@@ -171,7 +176,7 @@ var _ = Describe("EphemeralRunner", func() {
).Should(BeEquivalentTo(ephemeralRunner.Name)) ).Should(BeEquivalentTo(ephemeralRunner.Name))
}) })
It("It should re-create pod on failure", func() { It("It should re-create pod on failure and no job assigned", func() {
pod := new(corev1.Pod) pod := new(corev1.Pod)
Eventually(func() (bool, error) { Eventually(func() (bool, error) {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil { if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
@@ -195,6 +200,67 @@ var _ = Describe("EphemeralRunner", func() {
).Should(BeEquivalentTo(true)) ).Should(BeEquivalentTo(true))
}) })
It("It should delete ephemeral runner on failure and job assigned", func() {
er := new(v1alpha1.EphemeralRunner)
// Check if finalizer is added
Eventually(
func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
return err
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(Succeed(), "failed to get ephemeral runner")
// update job id to simulate job assigned
er.Status.JobID = "1"
err := k8sClient.Status().Update(ctx, er)
Expect(err).To(BeNil(), "failed to update ephemeral runner status")
er = new(v1alpha1.EphemeralRunner)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
if err != nil {
return "", err
}
return er.Status.JobID, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo("1"))
pod := new(corev1.Pod)
Eventually(func() (bool, error) {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
return false, err
}
return true, nil
}).Should(BeEquivalentTo(true))
// delete pod to simulate failure
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: v1alpha1.EphemeralRunnerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
},
},
})
err = k8sClient.Status().Update(ctx, pod)
Expect(err).To(BeNil(), "Failed to update pod status")
er = new(v1alpha1.EphemeralRunner)
Eventually(
func() bool {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
return kerrors.IsNotFound(err)
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
})
It("It should failed if a pod template is invalid", func() { It("It should failed if a pod template is invalid", func() {
invalideEphemeralRunner := newExampleRunner("invalid-ephemeral-runner", autoscalingNS.Name, configSecret.Name) invalideEphemeralRunner := newExampleRunner("invalid-ephemeral-runner", autoscalingNS.Name, configSecret.Name)
invalideEphemeralRunner.Spec.Spec.PriorityClassName = "notexist" invalideEphemeralRunner.Spec.Spec.PriorityClassName = "notexist"
@@ -203,13 +269,22 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).To(BeNil()) Expect(err).To(BeNil())
updated := new(v1alpha1.EphemeralRunner) updated := new(v1alpha1.EphemeralRunner)
Eventually(func() (corev1.PodPhase, error) { Eventually(
err := k8sClient.Get(ctx, client.ObjectKey{Name: invalideEphemeralRunner.Name, Namespace: invalideEphemeralRunner.Namespace}, updated) func() (corev1.PodPhase, error) {
if err != nil { err := k8sClient.Get(
return "", nil ctx,
} client.ObjectKey{Name: invalideEphemeralRunner.Name, Namespace: invalideEphemeralRunner.Namespace},
return updated.Status.Phase, nil updated,
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodFailed)) )
if err != nil {
return "", nil
}
return updated.Status.Phase, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(corev1.PodFailed))
Expect(updated.Status.Reason).Should(Equal("InvalidPod")) Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found")) Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
}) })
@@ -528,44 +603,26 @@ var _ = Describe("EphemeralRunner", func() {
).Should(BeEquivalentTo("")) ).Should(BeEquivalentTo(""))
}) })
It("It should not re-create pod indefinitely", func() { It("It should eventually delete ephemeral runner after consecutive failures", func() {
updated := new(v1alpha1.EphemeralRunner) updated := new(v1alpha1.EphemeralRunner)
pod := new(corev1.Pod)
Eventually( Eventually(
func() (bool, error) { func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated) return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
if err != nil {
return false, err
}
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
if err != nil {
if kerrors.IsNotFound(err) && len(updated.Status.Failures) > 5 {
return true, nil
}
return false, err
}
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: v1alpha1.EphemeralRunnerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
},
},
})
err = k8sClient.Status().Update(ctx, pod)
Expect(err).To(BeNil(), "Failed to update pod status")
return false, fmt.Errorf("pod haven't failed for 5 times.")
}, },
ephemeralRunnerTimeout, ephemeralRunnerTimeout,
ephemeralRunnerInterval, ephemeralRunnerInterval,
).Should(BeEquivalentTo(true), "we should stop creating pod after 5 failures") ).Should(Succeed(), "failed to get ephemeral runner")
failEphemeralRunnerPod := func() *corev1.Pod {
pod := new(corev1.Pod)
Eventually(
func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: updated.Name, Namespace: updated.Namespace}, pod)
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(Succeed(), "failed to get ephemeral runner pod")
// In case we still have pod created due to controller-runtime cache delay, mark the container as exited
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
if err == nil {
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{ pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: v1alpha1.EphemeralRunnerContainerName, Name: v1alpha1.EphemeralRunnerContainerName,
State: corev1.ContainerState{ State: corev1.ContainerState{
@@ -576,25 +633,70 @@ var _ = Describe("EphemeralRunner", func() {
}) })
err := k8sClient.Status().Update(ctx, pod) err := k8sClient.Status().Update(ctx, pod)
Expect(err).To(BeNil(), "Failed to update pod status") Expect(err).To(BeNil(), "Failed to update pod status")
return pod
} }
// EphemeralRunner should failed with reason TooManyPodFailures for i := range 5 {
Eventually(func() (string, error) { pod := failEphemeralRunnerPod()
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
if err != nil {
return "", err
}
return updated.Status.Reason, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures")
// EphemeralRunner should not have any pod Eventually(
Eventually(func() (bool, error) { func() (int, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod) err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
if err == nil { if err != nil {
return false, nil return 0, err
} }
return kerrors.IsNotFound(err), nil return len(updated.Status.Failures), nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true)) },
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(i + 1))
Eventually(
func() error {
nextPod := new(corev1.Pod)
err := k8sClient.Get(ctx, client.ObjectKey{Name: pod.Name, Namespace: pod.Namespace}, nextPod)
if err != nil {
return err
}
if nextPod.UID != pod.UID {
return nil
}
return fmt.Errorf("pod not recreated")
},
).WithTimeout(20*time.Second).WithPolling(10*time.Millisecond).Should(Succeed(), "pod should be recreated")
Eventually(
func() (bool, error) {
pod := new(corev1.Pod)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
if err != nil {
return false, err
}
for _, cs := range pod.Status.ContainerStatuses {
if cs.Name == v1alpha1.EphemeralRunnerContainerName {
return cs.State.Terminated == nil, nil
}
}
return true, nil
},
).WithTimeout(20*time.Second).WithPolling(10*time.Millisecond).Should(BeEquivalentTo(true), "pod should be terminated")
}
failEphemeralRunnerPod()
Eventually(
func() (bool, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
if kerrors.IsNotFound(err) {
return true, nil
}
return false, err
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
}) })
It("It should re-create pod on eviction", func() { It("It should re-create pod on eviction", func() {
@@ -643,53 +745,6 @@ var _ = Describe("EphemeralRunner", func() {
).Should(BeEquivalentTo(true)) ).Should(BeEquivalentTo(true))
}) })
It("It should re-create pod on exit status 0, but runner exists within the service", func() {
pod := new(corev1.Pod)
Eventually(
func() (bool, error) {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
return false, err
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: v1alpha1.EphemeralRunnerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
})
err := k8sClient.Status().Update(ctx, pod)
Expect(err).To(BeNil(), "failed to update pod status")
updated := new(v1alpha1.EphemeralRunner)
Eventually(func() (bool, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
if err != nil {
return false, err
}
return len(updated.Status.Failures) == 1, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
// should re-create after failure
Eventually(
func() (bool, error) {
pod := new(corev1.Pod)
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
return false, err
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
It("It should not set the phase to succeeded without pod termination status", func() { It("It should not set the phase to succeeded without pod termination status", func() {
pod := new(corev1.Pod) pod := new(corev1.Pod)
Eventually( Eventually(
@@ -762,22 +817,27 @@ var _ = Describe("EphemeralRunner", func() {
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
Log: logf.Log, Log: logf.Log,
ActionsClient: fake.NewMultiClient( ResourceBuilder: ResourceBuilder{
fake.WithDefaultClient( SecretResolver: &SecretResolver{
fake.NewFakeClient( k8sClient: mgr.GetClient(),
fake.WithGetRunner( multiClient: fake.NewMultiClient(
fake.WithDefaultClient(
fake.NewFakeClient(
fake.WithGetRunner(
nil,
&actions.ActionsError{
StatusCode: http.StatusNotFound,
Err: &actions.ActionsExceptionError{
ExceptionName: "AgentNotFoundException",
},
},
),
),
nil, nil,
&actions.ActionsError{
StatusCode: http.StatusNotFound,
Err: &actions.ActionsExceptionError{
ExceptionName: "AgentNotFoundException",
},
},
), ),
), ),
nil, },
), },
),
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
Expect(err).To(BeNil(), "failed to setup controller") Expect(err).To(BeNil(), "failed to setup controller")
@@ -785,7 +845,7 @@ var _ = Describe("EphemeralRunner", func() {
startManagers(GinkgoT(), mgr) startManagers(GinkgoT(), mgr)
}) })
It("It should set the Phase to Succeeded", func() { It("It should delete EphemeralRunner when pod exits successfully", func() {
ephemeralRunner := newExampleRunner("test-runner", autoscalingNS.Name, configSecret.Name) ephemeralRunner := newExampleRunner("test-runner", autoscalingNS.Name, configSecret.Name)
err := k8sClient.Create(ctx, ephemeralRunner) err := k8sClient.Create(ctx, ephemeralRunner)
@@ -811,13 +871,18 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).To(BeNil(), "failed to update pod status") Expect(err).To(BeNil(), "failed to update pod status")
updated := new(v1alpha1.EphemeralRunner) updated := new(v1alpha1.EphemeralRunner)
Eventually(func() (corev1.PodPhase, error) { Eventually(
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated) func() bool {
if err != nil { err := k8sClient.Get(
return "", nil ctx,
} client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace},
return updated.Status.Phase, nil updated,
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodSucceeded)) )
return kerrors.IsNotFound(err)
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeTrue())
}) })
}) })
@@ -834,10 +899,15 @@ var _ = Describe("EphemeralRunner", func() {
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoScalingNS.Name) configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoScalingNS.Name)
controller = &EphemeralRunnerReconciler{ controller = &EphemeralRunnerReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
Log: logf.Log, Log: logf.Log,
ActionsClient: fake.NewMultiClient(), ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: fake.NewMultiClient(),
},
},
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
Expect(err).To(BeNil(), "failed to setup controller") Expect(err).To(BeNil(), "failed to setup controller")
@@ -847,7 +917,12 @@ var _ = Describe("EphemeralRunner", func() {
It("uses an actions client with proxy transport", func() { It("uses an actions client with proxy transport", func() {
// Use an actual client // Use an actual client
controller.ActionsClient = actions.NewMultiClient(logr.Discard()) controller.ResourceBuilder = ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: actions.NewMultiClient(logr.Discard()),
},
}
proxySuccessfulllyCalled := false proxySuccessfulllyCalled := false
proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -998,10 +1073,15 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs") Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs")
controller = &EphemeralRunnerReconciler{ controller = &EphemeralRunnerReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
Log: logf.Log, Log: logf.Log,
ActionsClient: fake.NewMultiClient(), ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: fake.NewMultiClient(),
},
},
} }
err = controller.SetupWithManager(mgr) err = controller.SetupWithManager(mgr)
@@ -1032,11 +1112,16 @@ var _ = Describe("EphemeralRunner", func() {
server.StartTLS() server.StartTLS()
// Use an actual client // Use an actual client
controller.ActionsClient = actions.NewMultiClient(logr.Discard()) controller.ResourceBuilder = ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: actions.NewMultiClient(logr.Discard()),
},
}
ephemeralRunner := newExampleRunner("test-runner", autoScalingNS.Name, configSecret.Name) ephemeralRunner := newExampleRunner("test-runner", autoScalingNS.Name, configSecret.Name)
ephemeralRunner.Spec.GitHubConfigUrl = server.ConfigURLForOrg("my-org") ephemeralRunner.Spec.GitHubConfigUrl = server.ConfigURLForOrg("my-org")
ephemeralRunner.Spec.GitHubServerTLS = &v1alpha1.GitHubServerTLSConfig{ ephemeralRunner.Spec.GitHubServerTLS = &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{

View File

@@ -83,7 +83,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
} }
// Requested deletion does not need reconciled. // Requested deletion does not need reconciled.
if !ephemeralRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() { if !ephemeralRunnerSet.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(ephemeralRunnerSet, ephemeralRunnerSetFinalizerName) { if !controllerutil.ContainsFinalizer(ephemeralRunnerSet, ephemeralRunnerSetFinalizerName) {
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
@@ -331,7 +331,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
return false, nil return false, nil
} }
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunnerSet) actionsClient, err := r.GetActionsService(ctx, ephemeralRunnerSet)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -360,7 +360,7 @@ func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Contex
// Track multiple errors at once and return the bundle. // Track multiple errors at once and return the bundle.
errs := make([]error, 0) errs := make([]error, 0)
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
ephemeralRunner := r.ResourceBuilder.newEphemeralRunner(runnerSet) ephemeralRunner := r.newEphemeralRunner(runnerSet)
if runnerSet.Spec.EphemeralRunnerSpec.Proxy != nil { if runnerSet.Spec.EphemeralRunnerSpec.Proxy != nil {
ephemeralRunner.Spec.ProxySecretRef = proxyEphemeralRunnerSetSecretName(runnerSet) ephemeralRunner.Spec.ProxySecretRef = proxyEphemeralRunnerSetSecretName(runnerSet)
} }
@@ -439,7 +439,7 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
log.Info("No pending or running ephemeral runners running at this time for scale down") log.Info("No pending or running ephemeral runners running at this time for scale down")
return nil return nil
} }
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunnerSet) actionsClient, err := r.GetActionsService(ctx, ephemeralRunnerSet)
if err != nil { if err != nil {
return fmt.Errorf("failed to create actions client for ephemeral runner replica set: %w", err) return fmt.Errorf("failed to create actions client for ephemeral runner replica set: %w", err)
} }
@@ -453,8 +453,13 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
continue continue
} }
if !isDone && ephemeralRunner.Status.JobRequestId > 0 { if !isDone && ephemeralRunner.HasJob() {
log.Info("Skipping ephemeral runner since it is running a job", "name", ephemeralRunner.Name, "jobRequestId", ephemeralRunner.Status.JobRequestId) log.Info(
"Skipping ephemeral runner since it is running a job",
"name", ephemeralRunner.Name,
"workflowRunId", ephemeralRunner.Status.WorkflowRunId,
"jobId", ephemeralRunner.Status.JobID,
)
continue continue
} }
@@ -502,73 +507,6 @@ func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ct
return true, nil return true, nil
} }
func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) (actions.ActionsService, error) {
secret := new(corev1.Secret)
if err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: rs.Spec.EphemeralRunnerSpec.GitHubConfigSecret}, secret); err != nil {
return nil, fmt.Errorf("failed to get secret: %w", err)
}
opts, err := r.actionsClientOptionsFor(ctx, rs)
if err != nil {
return nil, fmt.Errorf("failed to get actions client options: %w", err)
}
return r.ActionsClient.GetClientFromSecret(
ctx,
rs.Spec.EphemeralRunnerSpec.GitHubConfigUrl,
rs.Namespace,
secret.Data,
opts...,
)
}
func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) ([]actions.ClientOption, error) {
var opts []actions.ClientOption
if rs.Spec.EphemeralRunnerSpec.Proxy != nil {
proxyFunc, err := rs.Spec.EphemeralRunnerSpec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
var secret corev1.Secret
err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: s}, &secret)
if err != nil {
return nil, fmt.Errorf("failed to get secret %s: %w", s, err)
}
return &secret, nil
})
if err != nil {
return nil, fmt.Errorf("failed to get proxy func: %w", err)
}
opts = append(opts, actions.WithProxy(proxyFunc))
}
tlsConfig := rs.Spec.EphemeralRunnerSpec.GitHubServerTLS
if tlsConfig != nil {
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
var configmap corev1.ConfigMap
err := r.Get(
ctx,
types.NamespacedName{
Namespace: rs.Namespace,
Name: name,
},
&configmap,
)
if err != nil {
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
}
return []byte(configmap.Data[key]), nil
})
if err != nil {
return nil, fmt.Errorf("failed to get tls config: %w", err)
}
opts = append(opts, actions.WithRootCAs(pool))
}
return opts, nil
}
// SetupWithManager sets up the controller with the Manager. // SetupWithManager sets up the controller with the Manager.
func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr). return ctrl.NewControllerManagedBy(mgr).
@@ -641,7 +579,7 @@ func newEphemeralRunnerState(ephemeralRunnerList *v1alpha1.EphemeralRunnerList)
if err == nil && patchID > ephemeralRunnerState.latestPatchID { if err == nil && patchID > ephemeralRunnerState.latestPatchID {
ephemeralRunnerState.latestPatchID = patchID ephemeralRunnerState.latestPatchID = patchID
} }
if !r.ObjectMeta.DeletionTimestamp.IsZero() { if !r.DeletionTimestamp.IsZero() {
ephemeralRunnerState.deleting = append(ephemeralRunnerState.deleting, r) ephemeralRunnerState.deleting = append(ephemeralRunnerState.deleting, r)
continue continue
} }

View File

@@ -10,6 +10,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"testing"
"time" "time"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
@@ -21,6 +22,7 @@ import (
"github.com/go-logr/logr" "github.com/go-logr/logr"
. "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
@@ -35,6 +37,10 @@ const (
ephemeralRunnerSetTestGitHubToken = "gh_token" ephemeralRunnerSetTestGitHubToken = "gh_token"
) )
func TestPrecomputedConstants(t *testing.T) {
require.Equal(t, len(failedRunnerBackoff), maxFailures+1)
}
var _ = Describe("Test EphemeralRunnerSet controller", func() { var _ = Describe("Test EphemeralRunnerSet controller", func() {
var ctx context.Context var ctx context.Context
var mgr ctrl.Manager var mgr ctrl.Manager
@@ -48,10 +54,15 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
controller := &EphemeralRunnerSetReconciler{ controller := &EphemeralRunnerSetReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
Log: logf.Log, Log: logf.Log,
ActionsClient: fake.NewMultiClient(), ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: fake.NewMultiClient(),
},
},
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1098,10 +1109,15 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name) configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
controller := &EphemeralRunnerSetReconciler{ controller := &EphemeralRunnerSetReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
Log: logf.Log, Log: logf.Log,
ActionsClient: actions.NewMultiClient(logr.Discard()), ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: actions.NewMultiClient(logr.Discard()),
},
},
} }
err := controller.SetupWithManager(mgr) err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1397,10 +1413,15 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs") Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs")
controller := &EphemeralRunnerSetReconciler{ controller := &EphemeralRunnerSetReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
Log: logf.Log, Log: logf.Log,
ActionsClient: actions.NewMultiClient(logr.Discard()), ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: actions.NewMultiClient(logr.Discard()),
},
},
} }
err = controller.SetupWithManager(mgr) err = controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller") Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1439,7 +1460,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{ EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
GitHubConfigUrl: server.ConfigURLForOrg("my-org"), GitHubConfigUrl: server.ConfigURLForOrg("my-org"),
GitHubConfigSecret: configSecret.Name, GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{ GitHubServerTLS: &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{ CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{ LocalObjectReference: corev1.LocalObjectReference{

View File

@@ -0,0 +1,12 @@
package actionsgithubcom
type controllerError string
func (e controllerError) Error() string {
return string(e)
}
const (
retryableError = controllerError("retryable error")
fatalError = controllerError("fatal error")
)

View File

@@ -5,17 +5,20 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"maps"
"math" "math"
"net" "net"
"strconv" "strconv"
"strings" "strings"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1" "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/build" "github.com/actions/actions-runner-controller/build"
listenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config" ghalistenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/github/actions" "github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/hash" "github.com/actions/actions-runner-controller/hash"
"github.com/actions/actions-runner-controller/logging" "github.com/actions/actions-runner-controller/logging"
"github.com/actions/actions-runner-controller/vault/azurekeyvault"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1" rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -71,6 +74,7 @@ func SetListenerEntrypoint(entrypoint string) {
type ResourceBuilder struct { type ResourceBuilder struct {
ExcludeLabelPropagationPrefixes []string ExcludeLabelPropagationPrefixes []string
*SecretResolver
} }
// boolPtr returns a pointer to a bool value // boolPtr returns a pointer to a bool value
@@ -120,6 +124,7 @@ func (b *ResourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
Spec: v1alpha1.AutoscalingListenerSpec{ Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl, GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret, GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
VaultConfig: autoscalingRunnerSet.VaultConfig(),
RunnerScaleSetId: runnerScaleSetId, RunnerScaleSetId: runnerScaleSetId,
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace, AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
AutoscalingRunnerSetName: autoscalingRunnerSet.Name, AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
@@ -159,7 +164,7 @@ func (lm *listenerMetricsServerConfig) containerPort() (corev1.ContainerPort, er
}, nil }, nil
} }
func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) { func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, appConfig *appconfig.AppConfig, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
var ( var (
metricsAddr = "" metricsAddr = ""
metricsEndpoint = "" metricsEndpoint = ""
@@ -169,30 +174,8 @@ func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
metricsEndpoint = metricsConfig.endpoint metricsEndpoint = metricsConfig.endpoint
} }
var appID int64 config := ghalistenerconfig.Config{
if id, ok := secret.Data["github_app_id"]; ok {
var err error
appID, err = strconv.ParseInt(string(id), 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to convert github_app_id to int: %v", err)
}
}
var appInstallationID int64
if id, ok := secret.Data["github_app_installation_id"]; ok {
var err error
appInstallationID, err = strconv.ParseInt(string(id), 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to convert github_app_installation_id to int: %v", err)
}
}
config := listenerconfig.Config{
ConfigureUrl: autoscalingListener.Spec.GitHubConfigUrl, ConfigureUrl: autoscalingListener.Spec.GitHubConfigUrl,
AppID: appID,
AppInstallationID: appInstallationID,
AppPrivateKey: string(secret.Data["github_app_private_key"]),
Token: string(secret.Data["github_token"]),
EphemeralRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, EphemeralRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
EphemeralRunnerSetName: autoscalingListener.Spec.EphemeralRunnerSetName, EphemeralRunnerSetName: autoscalingListener.Spec.EphemeralRunnerSetName,
MaxRunners: autoscalingListener.Spec.MaxRunners, MaxRunners: autoscalingListener.Spec.MaxRunners,
@@ -207,6 +190,24 @@ func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
Metrics: autoscalingListener.Spec.Metrics, Metrics: autoscalingListener.Spec.Metrics,
} }
vault := autoscalingListener.Spec.VaultConfig
if vault == nil {
config.AppConfig = appConfig
} else {
config.VaultType = vault.Type
config.VaultLookupKey = autoscalingListener.Spec.GitHubConfigSecret
config.AzureKeyVaultConfig = &azurekeyvault.Config{
TenantID: vault.AzureKeyVault.TenantID,
ClientID: vault.AzureKeyVault.ClientID,
URL: vault.AzureKeyVault.URL,
CertificatePath: vault.AzureKeyVault.CertificatePath,
}
}
if err := config.Validate(); err != nil {
return nil, fmt.Errorf("invalid listener config: %w", err)
}
var buf bytes.Buffer var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(config); err != nil { if err := json.NewEncoder(&buf).Encode(config); err != nil {
return nil, fmt.Errorf("failed to encode config: %w", err) return nil, fmt.Errorf("failed to encode config: %w", err)
@@ -223,7 +224,7 @@ func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
}, nil }, nil
} }
func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) { func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
listenerEnv := []corev1.EnvVar{ listenerEnv := []corev1.EnvVar{
{ {
Name: "LISTENER_CONFIG_PATH", Name: "LISTENER_CONFIG_PATH",
@@ -278,9 +279,7 @@ func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
} }
labels := make(map[string]string, len(autoscalingListener.Labels)) labels := make(map[string]string, len(autoscalingListener.Labels))
for key, val := range autoscalingListener.Labels { maps.Copy(labels, autoscalingListener.Labels)
labels[key] = val
}
newRunnerScaleSetListenerPod := &corev1.Pod{ newRunnerScaleSetListenerPod := &corev1.Pod{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
@@ -429,7 +428,7 @@ func mergeListenerContainer(base, from *corev1.Container) {
func (b *ResourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount { func (b *ResourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
return &corev1.ServiceAccount{ return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerServiceAccountName(autoscalingListener), Name: autoscalingListener.Name,
Namespace: autoscalingListener.Namespace, Namespace: autoscalingListener.Namespace,
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{ Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
@@ -444,7 +443,7 @@ func (b *ResourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.
rulesHash := hash.ComputeTemplateHash(&rules) rulesHash := hash.ComputeTemplateHash(&rules)
newRole := &rbacv1.Role{ newRole := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerRoleName(autoscalingListener), Name: autoscalingListener.Name,
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{ Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
@@ -478,7 +477,7 @@ func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
newRoleBinding := &rbacv1.RoleBinding{ newRoleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerRoleName(autoscalingListener), Name: autoscalingListener.Name,
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{ Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
@@ -496,25 +495,6 @@ func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
return newRoleBinding return newRoleBinding
} }
func (b *ResourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
dataHash := hash.ComputeTemplateHash(&secret.Data)
newListenerSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerSecretMirrorName(autoscalingListener),
Namespace: autoscalingListener.Namespace,
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
"secret-data-hash": dataHash,
}),
},
Data: secret.DeepCopy().Data,
}
return newListenerSecret
}
func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) { func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]) runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil { if err != nil {
@@ -543,8 +523,8 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{ newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
TypeMeta: metav1.TypeMeta{}, TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-", GenerateName: autoscalingRunnerSet.Name + "-",
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace, Namespace: autoscalingRunnerSet.Namespace,
Labels: labels, Labels: labels,
Annotations: newAnnotations, Annotations: newAnnotations,
OwnerReferences: []metav1.OwnerReference{ OwnerReferences: []metav1.OwnerReference{
@@ -567,6 +547,7 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
Proxy: autoscalingRunnerSet.Spec.Proxy, Proxy: autoscalingRunnerSet.Spec.Proxy,
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS, GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
PodTemplateSpec: autoscalingRunnerSet.Spec.Template, PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
VaultConfig: autoscalingRunnerSet.VaultConfig(),
}, },
}, },
} }
@@ -588,6 +569,7 @@ func (b *ResourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme
for key, val := range ephemeralRunnerSet.Annotations { for key, val := range ephemeralRunnerSet.Annotations {
annotations[key] = val annotations[key] = val
} }
annotations[AnnotationKeyPatchID] = strconv.Itoa(ephemeralRunnerSet.Spec.PatchID) annotations[AnnotationKeyPatchID] = strconv.Itoa(ephemeralRunnerSet.Spec.PatchID)
return &v1alpha1.EphemeralRunner{ return &v1alpha1.EphemeralRunner{
TypeMeta: metav1.TypeMeta{}, TypeMeta: metav1.TypeMeta{},
@@ -617,18 +599,18 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
labels := map[string]string{} labels := map[string]string{}
annotations := map[string]string{} annotations := map[string]string{}
for k, v := range runner.ObjectMeta.Labels { for k, v := range runner.Labels {
labels[k] = v labels[k] = v
} }
for k, v := range runner.Spec.PodTemplateSpec.Labels { for k, v := range runner.Spec.Labels {
labels[k] = v labels[k] = v
} }
labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue) labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue)
for k, v := range runner.ObjectMeta.Annotations { for k, v := range runner.Annotations {
annotations[k] = v annotations[k] = v
} }
for k, v := range runner.Spec.PodTemplateSpec.Annotations { for k, v := range runner.Spec.Annotations {
annotations[k] = v annotations[k] = v
} }
@@ -636,12 +618,12 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
FilterLabels(labels, LabelKeyRunnerTemplateHash), FilterLabels(labels, LabelKeyRunnerTemplateHash),
annotations, annotations,
runner.Spec, runner.Spec,
runner.Status.RunnerJITConfig, secret.Data,
) )
objectMeta := metav1.ObjectMeta{ objectMeta := metav1.ObjectMeta{
Name: runner.ObjectMeta.Name, Name: runner.Name,
Namespace: runner.ObjectMeta.Namespace, Namespace: runner.Namespace,
Labels: labels, Labels: labels,
Annotations: annotations, Annotations: annotations,
OwnerReferences: []metav1.OwnerReference{ OwnerReferences: []metav1.OwnerReference{
@@ -657,10 +639,10 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
} }
newPod.ObjectMeta = objectMeta newPod.ObjectMeta = objectMeta
newPod.Spec = runner.Spec.PodTemplateSpec.Spec newPod.Spec = runner.Spec.Spec
newPod.Spec.Containers = make([]corev1.Container, 0, len(runner.Spec.PodTemplateSpec.Spec.Containers)) newPod.Spec.Containers = make([]corev1.Container, 0, len(runner.Spec.Spec.Containers))
for _, c := range runner.Spec.PodTemplateSpec.Spec.Containers { for _, c := range runner.Spec.Spec.Containers {
if c.Name == v1alpha1.EphemeralRunnerContainerName { if c.Name == v1alpha1.EphemeralRunnerContainerName {
c.Env = append( c.Env = append(
c.Env, c.Env,
@@ -689,14 +671,17 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
return &newPod return &newPod
} }
func (b *ResourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret { func (b *ResourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner, jitConfig *actions.RunnerScaleSetJitRunnerConfig) *corev1.Secret {
return &corev1.Secret{ return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: ephemeralRunner.Name, Name: ephemeralRunner.Name,
Namespace: ephemeralRunner.Namespace, Namespace: ephemeralRunner.Namespace,
}, },
Data: map[string][]byte{ Data: map[string][]byte{
jitTokenKey: []byte(ephemeralRunner.Status.RunnerJITConfig), jitTokenKey: []byte(jitConfig.EncodedJITConfig),
"runnerName": []byte(jitConfig.Runner.Name),
"runnerId": []byte(strconv.Itoa(jitConfig.Runner.Id)),
"scaleSetId": []byte(strconv.Itoa(jitConfig.Runner.RunnerScaleSetId)),
}, },
} }
} }
@@ -713,30 +698,6 @@ func scaleSetListenerName(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) s
return fmt.Sprintf("%v-%v-listener", autoscalingRunnerSet.Name, namespaceHash) return fmt.Sprintf("%v-%v-listener", autoscalingRunnerSet.Name, namespaceHash)
} }
func scaleSetListenerServiceAccountName(autoscalingListener *v1alpha1.AutoscalingListener) string {
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
if len(namespaceHash) > 8 {
namespaceHash = namespaceHash[:8]
}
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
}
func scaleSetListenerRoleName(autoscalingListener *v1alpha1.AutoscalingListener) string {
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
if len(namespaceHash) > 8 {
namespaceHash = namespaceHash[:8]
}
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
}
func scaleSetListenerSecretMirrorName(autoscalingListener *v1alpha1.AutoscalingListener) string {
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
if len(namespaceHash) > 8 {
namespaceHash = namespaceHash[:8]
}
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
}
func proxyListenerSecretName(autoscalingListener *v1alpha1.AutoscalingListener) string { func proxyListenerSecretName(autoscalingListener *v1alpha1.AutoscalingListener) string {
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace) namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
if len(namespaceHash) > 8 { if len(namespaceHash) > 8 {

View File

@@ -82,12 +82,7 @@ func TestLabelPropagation(t *testing.T) {
Name: "test", Name: "test",
}, },
} }
listenerSecret := &corev1.Secret{ listenerPod, err := b.newScaleSetListenerPod(listener, &corev1.Secret{}, listenerServiceAccount, nil)
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
}
listenerPod, err := b.newScaleSetListenerPod(listener, &corev1.Secret{}, listenerServiceAccount, listenerSecret, nil)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, listenerPod.Labels, listener.Labels) assert.Equal(t, listenerPod.Labels, listener.Labels)

View File

@@ -0,0 +1,280 @@
package actionsgithubcom
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/vault"
"github.com/actions/actions-runner-controller/vault/azurekeyvault"
"golang.org/x/net/http/httpproxy"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type SecretResolver struct {
k8sClient client.Client
multiClient actions.MultiClient
}
type SecretResolverOption func(*SecretResolver)
func NewSecretResolver(k8sClient client.Client, multiClient actions.MultiClient, opts ...SecretResolverOption) *SecretResolver {
if k8sClient == nil {
panic("k8sClient must not be nil")
}
secretResolver := &SecretResolver{
k8sClient: k8sClient,
multiClient: multiClient,
}
for _, opt := range opts {
opt(secretResolver)
}
return secretResolver
}
type ActionsGitHubObject interface {
client.Object
GitHubConfigUrl() string
GitHubConfigSecret() string
GitHubProxy() *v1alpha1.ProxyConfig
GitHubServerTLS() *v1alpha1.TLSConfig
VaultConfig() *v1alpha1.VaultConfig
VaultProxy() *v1alpha1.ProxyConfig
}
func (sr *SecretResolver) GetAppConfig(ctx context.Context, obj ActionsGitHubObject) (*appconfig.AppConfig, error) {
resolver, err := sr.resolverForObject(ctx, obj)
if err != nil {
return nil, fmt.Errorf("failed to get resolver for object: %v", err)
}
appConfig, err := resolver.appConfig(ctx, obj.GitHubConfigSecret())
if err != nil {
return nil, fmt.Errorf("failed to resolve app config: %v", err)
}
return appConfig, nil
}
func (sr *SecretResolver) GetActionsService(ctx context.Context, obj ActionsGitHubObject) (actions.ActionsService, error) {
resolver, err := sr.resolverForObject(ctx, obj)
if err != nil {
return nil, fmt.Errorf("failed to get resolver for object: %v", err)
}
appConfig, err := resolver.appConfig(ctx, obj.GitHubConfigSecret())
if err != nil {
return nil, fmt.Errorf("failed to resolve app config: %v", err)
}
var clientOptions []actions.ClientOption
if proxy := obj.GitHubProxy(); proxy != nil {
config := &httpproxy.Config{
NoProxy: strings.Join(proxy.NoProxy, ","),
}
if proxy.HTTP != nil {
u, err := url.Parse(proxy.HTTP.Url)
if err != nil {
return nil, fmt.Errorf("failed to parse proxy http url %q: %w", proxy.HTTP.Url, err)
}
if ref := proxy.HTTP.CredentialSecretRef; ref != "" {
u.User, err = resolver.proxyCredentials(ctx, ref)
if err != nil {
return nil, fmt.Errorf("failed to resolve proxy credentials: %v", err)
}
}
config.HTTPProxy = u.String()
}
if proxy.HTTPS != nil {
u, err := url.Parse(proxy.HTTPS.Url)
if err != nil {
return nil, fmt.Errorf("failed to parse proxy https url %q: %w", proxy.HTTPS.Url, err)
}
if ref := proxy.HTTPS.CredentialSecretRef; ref != "" {
u.User, err = resolver.proxyCredentials(ctx, ref)
if err != nil {
return nil, fmt.Errorf("failed to resolve proxy credentials: %v", err)
}
}
config.HTTPSProxy = u.String()
}
proxyFunc := func(req *http.Request) (*url.URL, error) {
return config.ProxyFunc()(req.URL)
}
clientOptions = append(clientOptions, actions.WithProxy(proxyFunc))
}
tlsConfig := obj.GitHubServerTLS()
if tlsConfig != nil {
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
var configmap corev1.ConfigMap
err := sr.k8sClient.Get(
ctx,
types.NamespacedName{
Namespace: obj.GetNamespace(),
Name: name,
},
&configmap,
)
if err != nil {
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
}
return []byte(configmap.Data[key]), nil
})
if err != nil {
return nil, fmt.Errorf("failed to get tls config: %w", err)
}
clientOptions = append(clientOptions, actions.WithRootCAs(pool))
}
return sr.multiClient.GetClientFor(
ctx,
obj.GitHubConfigUrl(),
appConfig,
obj.GetNamespace(),
clientOptions...,
)
}
func (sr *SecretResolver) resolverForObject(ctx context.Context, obj ActionsGitHubObject) (resolver, error) {
vaultConfig := obj.VaultConfig()
if vaultConfig == nil || vaultConfig.Type == "" {
return &k8sResolver{
namespace: obj.GetNamespace(),
client: sr.k8sClient,
}, nil
}
var proxy *httpproxy.Config
if vaultProxy := obj.VaultProxy(); vaultProxy != nil {
p, err := vaultProxy.ToHTTPProxyConfig(func(s string) (*corev1.Secret, error) {
var secret corev1.Secret
err := sr.k8sClient.Get(ctx, types.NamespacedName{Name: s, Namespace: obj.GetNamespace()}, &secret)
if err != nil {
return nil, fmt.Errorf("failed to get secret %s: %w", s, err)
}
return &secret, nil
})
if err != nil {
return nil, fmt.Errorf("failed to create proxy config: %v", err)
}
proxy = p
}
switch vaultConfig.Type {
case vault.VaultTypeAzureKeyVault:
akv, err := azurekeyvault.New(azurekeyvault.Config{
TenantID: vaultConfig.AzureKeyVault.TenantID,
ClientID: vaultConfig.AzureKeyVault.ClientID,
URL: vaultConfig.AzureKeyVault.URL,
CertificatePath: vaultConfig.AzureKeyVault.CertificatePath,
Proxy: proxy,
})
if err != nil {
return nil, fmt.Errorf("failed to create Azure Key Vault client: %v", err)
}
return &vaultResolver{
vault: akv,
}, nil
default:
return nil, fmt.Errorf("unknown vault type %q", vaultConfig.Type)
}
}
type resolver interface {
appConfig(ctx context.Context, key string) (*appconfig.AppConfig, error)
proxyCredentials(ctx context.Context, key string) (*url.Userinfo, error)
}
type k8sResolver struct {
namespace string
client client.Client
}
func (r *k8sResolver) appConfig(ctx context.Context, key string) (*appconfig.AppConfig, error) {
nsName := types.NamespacedName{
Namespace: r.namespace,
Name: key,
}
secret := new(corev1.Secret)
if err := r.client.Get(
ctx,
nsName,
secret,
); err != nil {
return nil, fmt.Errorf("failed to get kubernetes secret: %q", nsName.String())
}
return appconfig.FromSecret(secret)
}
func (r *k8sResolver) proxyCredentials(ctx context.Context, key string) (*url.Userinfo, error) {
nsName := types.NamespacedName{Namespace: r.namespace, Name: key}
secret := new(corev1.Secret)
if err := r.client.Get(
ctx,
nsName,
secret,
); err != nil {
return nil, fmt.Errorf("failed to get kubernetes secret: %q", nsName.String())
}
return url.UserPassword(
string(secret.Data["username"]),
string(secret.Data["password"]),
), nil
}
type vaultResolver struct {
vault vault.Vault
}
func (r *vaultResolver) appConfig(ctx context.Context, key string) (*appconfig.AppConfig, error) {
val, err := r.vault.GetSecret(ctx, key)
if err != nil {
return nil, fmt.Errorf("failed to resolve secret: %v", err)
}
return appconfig.FromJSONString(val)
}
func (r *vaultResolver) proxyCredentials(ctx context.Context, key string) (*url.Userinfo, error) {
val, err := r.vault.GetSecret(ctx, key)
if err != nil {
return nil, fmt.Errorf("failed to resolve secret: %v", err)
}
type info struct {
Username string `json:"username"`
Password string `json:"password"`
}
var i info
if err := json.Unmarshal([]byte(val), &i); err != nil {
return nil, fmt.Errorf("failed to unmarshal info: %v", err)
}
return url.UserPassword(i.Username, i.Password), nil
}

View File

@@ -20,6 +20,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
"time"
"github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/config"
@@ -79,6 +80,15 @@ var _ = BeforeSuite(func() {
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil()) Expect(k8sClient).ToNot(BeNil())
failedRunnerBackoff = []time.Duration{
20 * time.Millisecond,
20 * time.Millisecond,
20 * time.Millisecond,
20 * time.Millisecond,
20 * time.Millisecond,
20 * time.Millisecond,
}
}) })
var _ = AfterSuite(func() { var _ = AfterSuite(func() {

View File

@@ -130,7 +130,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
jobs, resp, err := ghc.Actions.ListWorkflowJobs(context.TODO(), user, repoName, runID, &opt) jobs, resp, err := ghc.Actions.ListWorkflowJobs(context.TODO(), user, repoName, runID, &opt)
if err != nil { if err != nil {
r.Log.Error(err, "Error listing workflow jobs") r.Log.Error(err, "Error listing workflow jobs")
return //err return // err
} }
allJobs = append(allJobs, jobs.Jobs...) allJobs = append(allJobs, jobs.Jobs...)
if resp.NextPage == 0 { if resp.NextPage == 0 {
@@ -345,7 +345,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
} }
var runnerPodList corev1.PodList var runnerPodList corev1.PodList
if err := r.Client.List(ctx, &runnerPodList, client.InNamespace(hra.Namespace), client.MatchingLabels(map[string]string{ if err := r.List(ctx, &runnerPodList, client.InNamespace(hra.Namespace), client.MatchingLabels(map[string]string{
kindLabel: hra.Spec.ScaleTargetRef.Name, kindLabel: hra.Spec.ScaleTargetRef.Name,
})); err != nil { })); err != nil {
return nil, err return nil, err

View File

@@ -29,7 +29,7 @@ func newGithubClient(server *httptest.Server) *github.Client {
if err != nil { if err != nil {
panic(err) panic(err)
} }
client.Client.BaseURL = baseURL client.BaseURL = baseURL
return client return client
} }

View File

@@ -82,8 +82,8 @@ func (s *batchScaler) Add(st *ScaleTarget) {
break batch break batch
case st := <-s.queue: case st := <-s.queue:
nsName := types.NamespacedName{ nsName := types.NamespacedName{
Namespace: st.HorizontalRunnerAutoscaler.Namespace, Namespace: st.Namespace,
Name: st.HorizontalRunnerAutoscaler.Name, Name: st.Name,
} }
b, ok := batches[nsName] b, ok := batches[nsName]
if !ok { if !ok {
@@ -208,7 +208,7 @@ func (s *batchScaler) planBatchScale(ctx context.Context, batch batchScaleOperat
// //
// In other words, updating HRA.spec.scaleTriggers[].duration does not result in delaying capacity reservations expiration any longer // In other words, updating HRA.spec.scaleTriggers[].duration does not result in delaying capacity reservations expiration any longer
// than the "intended" duration, which is the duration of the trigger when the reservation was created. // than the "intended" duration, which is the duration of the trigger when the reservation was created.
duration := copy.Spec.CapacityReservations[i].ExpirationTime.Time.Sub(copy.Spec.CapacityReservations[i].EffectiveTime.Time) duration := copy.Spec.CapacityReservations[i].ExpirationTime.Sub(copy.Spec.CapacityReservations[i].EffectiveTime.Time)
copy.Spec.CapacityReservations[i].EffectiveTime = metav1.Time{Time: now} copy.Spec.CapacityReservations[i].EffectiveTime = metav1.Time{Time: now}
copy.Spec.CapacityReservations[i].ExpirationTime = metav1.Time{Time: now.Add(duration)} copy.Spec.CapacityReservations[i].ExpirationTime = metav1.Time{Time: now.Add(duration)}
} }

View File

@@ -503,13 +503,13 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getManagedRunnerGroup
switch kind { switch kind {
case "RunnerSet": case "RunnerSet":
var rs v1alpha1.RunnerSet var rs v1alpha1.RunnerSet
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil { if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
return groups, err return groups, err
} }
o, e, g = rs.Spec.Organization, rs.Spec.Enterprise, rs.Spec.Group o, e, g = rs.Spec.Organization, rs.Spec.Enterprise, rs.Spec.Group
case "RunnerDeployment", "": case "RunnerDeployment", "":
var rd v1alpha1.RunnerDeployment var rd v1alpha1.RunnerDeployment
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil { if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
return groups, err return groups, err
} }
o, e, g = rd.Spec.Template.Spec.Organization, rd.Spec.Template.Spec.Enterprise, rd.Spec.Template.Spec.Group o, e, g = rd.Spec.Template.Spec.Organization, rd.Spec.Template.Spec.Enterprise, rd.Spec.Template.Spec.Group
@@ -562,7 +562,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getJobScaleTarget(ctx
HRA: HRA:
for _, hra := range hras { for _, hra := range hras {
if !hra.ObjectMeta.DeletionTimestamp.IsZero() { if !hra.DeletionTimestamp.IsZero() {
continue continue
} }
@@ -603,7 +603,7 @@ HRA:
case "RunnerSet": case "RunnerSet":
var rs v1alpha1.RunnerSet var rs v1alpha1.RunnerSet
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil { if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
return nil, err return nil, err
} }
@@ -634,7 +634,7 @@ HRA:
case "RunnerDeployment", "": case "RunnerDeployment", "":
var rd v1alpha1.RunnerDeployment var rd v1alpha1.RunnerDeployment
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil { if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
return nil, err return nil, err
} }
@@ -676,7 +676,7 @@ func getValidCapacityReservations(autoscaler *v1alpha1.HorizontalRunnerAutoscale
now := time.Now() now := time.Now()
for _, reservation := range autoscaler.Spec.CapacityReservations { for _, reservation := range autoscaler.Spec.CapacityReservations {
if reservation.ExpirationTime.Time.After(now) { if reservation.ExpirationTime.After(now) {
capacityReservations = append(capacityReservations, reservation) capacityReservations = append(capacityReservations, reservation)
} }
} }
@@ -713,7 +713,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) indexer(rawObj client
switch hra.Spec.ScaleTargetRef.Kind { switch hra.Spec.ScaleTargetRef.Kind {
case "", "RunnerDeployment": case "", "RunnerDeployment":
var rd v1alpha1.RunnerDeployment var rd v1alpha1.RunnerDeployment
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil { if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerDeployment not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name)) autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerDeployment not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name))
return nil return nil
} }
@@ -740,7 +740,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) indexer(rawObj client
return keys return keys
case "RunnerSet": case "RunnerSet":
var rs v1alpha1.RunnerSet var rs v1alpha1.RunnerSet
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil { if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerSet not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name)) autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerSet not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name))
return nil return nil
} }

View File

@@ -71,7 +71,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
return ctrl.Result{}, client.IgnoreNotFound(err) return ctrl.Result{}, client.IgnoreNotFound(err)
} }
if !hra.ObjectMeta.DeletionTimestamp.IsZero() { if !hra.DeletionTimestamp.IsZero() {
r.GitHubClient.DeinitForHRA(&hra) r.GitHubClient.DeinitForHRA(&hra)
return ctrl.Result{}, nil return ctrl.Result{}, nil
@@ -91,7 +91,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
return ctrl.Result{}, client.IgnoreNotFound(err) return ctrl.Result{}, client.IgnoreNotFound(err)
} }
if !rd.ObjectMeta.DeletionTimestamp.IsZero() { if !rd.DeletionTimestamp.IsZero() {
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
@@ -120,14 +120,14 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime} copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
} }
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil { if err := r.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
return fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err) return fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err)
} }
} else if ephemeral && effectiveTime != nil { } else if ephemeral && effectiveTime != nil {
copy := rd.DeepCopy() copy := rd.DeepCopy()
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime} copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil { if err := r.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
return fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err) return fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err)
} }
} }
@@ -142,7 +142,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
return ctrl.Result{}, client.IgnoreNotFound(err) return ctrl.Result{}, client.IgnoreNotFound(err)
} }
if !rs.ObjectMeta.DeletionTimestamp.IsZero() { if !rs.DeletionTimestamp.IsZero() {
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
@@ -160,7 +160,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
org: rs.Spec.Organization, org: rs.Spec.Organization,
repo: rs.Spec.Repository, repo: rs.Spec.Repository,
replicas: replicas, replicas: replicas,
labels: rs.Spec.RunnerConfig.Labels, labels: rs.Spec.Labels,
getRunnerMap: func() (map[string]struct{}, error) { getRunnerMap: func() (map[string]struct{}, error) {
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns. // return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
var runnerPodList corev1.PodList var runnerPodList corev1.PodList
@@ -224,14 +224,14 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime} copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
} }
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil { if err := r.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
return fmt.Errorf("patching runnerset to have %d replicas: %w", newDesiredReplicas, err) return fmt.Errorf("patching runnerset to have %d replicas: %w", newDesiredReplicas, err)
} }
} else if ephemeral && effectiveTime != nil { } else if ephemeral && effectiveTime != nil {
copy := rs.DeepCopy() copy := rs.DeepCopy()
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime} copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil { if err := r.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
return fmt.Errorf("patching runnerset to have %d replicas: %w", newDesiredReplicas, err) return fmt.Errorf("patching runnerset to have %d replicas: %w", newDesiredReplicas, err)
} }
} }
@@ -253,7 +253,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) scaleTargetFromRD(ctx context.Con
org: rd.Spec.Template.Spec.Organization, org: rd.Spec.Template.Spec.Organization,
repo: rd.Spec.Template.Spec.Repository, repo: rd.Spec.Template.Spec.Repository,
replicas: rd.Spec.Replicas, replicas: rd.Spec.Replicas,
labels: rd.Spec.Template.Spec.RunnerConfig.Labels, labels: rd.Spec.Template.Spec.Labels,
getRunnerMap: func() (map[string]struct{}, error) { getRunnerMap: func() (map[string]struct{}, error) {
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns. // return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
var runnerList v1alpha1.RunnerList var runnerList v1alpha1.RunnerList
@@ -484,7 +484,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(ghc *arc
var reserved int var reserved int
for _, reservation := range hra.Spec.CapacityReservations { for _, reservation := range hra.Spec.CapacityReservations {
if reservation.ExpirationTime.Time.After(now) { if reservation.ExpirationTime.After(now) {
reserved += reservation.Replicas reserved += reservation.Replicas
} }
} }

Some files were not shown because too many files have changed in this diff Show More