Compare commits

...

65 Commits

Author SHA1 Message Date
Nikola Jokic
a0c30df25b Prepare 0.13.0 release (#4280) 2025-10-16 19:25:56 +02:00
dependabot[bot]
27d03ef2e2 Bump the gomod group across 1 directory with 4 updates (#4277)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-15 01:31:29 +02:00
Nikola Jokic
634e42c916 Bump all dependencies (#4266) 2025-10-14 13:24:25 +02:00
Jiaren Wu
6e46b42bf4 Potential fix for code scanning alert no. 1: Workflow does not contain permissions (#4274)
Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com>
Co-authored-by: jiaren-wu <190862939+jiaren-wu@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-10-13 11:08:35 -07:00
Jiaren Wu
71ebdd9d3c Potential fix for code scanning alert no. 3: Workflow does not contain permissions (#4273)
Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
2025-10-13 10:38:14 -07:00
Berat Postalcioglu
7604c8361f docs: fix broken Grafana dashboard JSON path (#4270) 2025-10-09 22:05:43 +02:00
Nikola Jokic
94a6f3cc3a Ensure ephemeral runner is deleted from the service on exit != 0 (#4260) 2025-10-06 11:38:56 +02:00
Nikola Jokic
e3ed1ba226 Introduce new kubernetes-novolume mode (#4250)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-10-03 12:03:38 +02:00
dependabot[bot]
652bd99439 Bump the actions group across 1 directory with 5 updates (#4262)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-01 17:27:52 +02:00
Yusuke Kuoka
f731873df9 Add workflow name and target labels (#4240) 2025-09-30 16:01:51 +02:00
Nikola Jokic
088e2a3a90 Remove ephemeral runner when exit code != 0 and is patched with the job (#4239) 2025-09-17 21:40:37 +02:00
Dennis Stone
2035e13724 Update CODEOWNERS to include new maintainer (#4253) 2025-09-17 21:33:38 +02:00
Nikola Jokic
04b966dfec Update CODEOWNERS (#4251) 2025-09-17 17:49:12 +02:00
zkpepe
0a0be027fd docs: fix repo path typo (#4229) 2025-08-27 16:17:52 +02:00
Nikola Jokic
ddc2918a48 Requeue if create pod returns already exists error (#4201) 2025-08-14 17:00:48 +02:00
github-actions[bot]
0e006bb0ff Updates: runner to v2.328.0 (#4209)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-08-14 10:44:40 +01:00
dependabot[bot]
ce7722aed4 Bump actions/checkout from 4 to 5 in the actions group (#4205)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 23:27:53 +02:00
dependabot[bot]
ad2dd7d787 Bump docker/login-action from 3.4.0 to 3.5.0 in the actions group (#4196)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-06 23:08:12 +02:00
clechevalli
30abbe0cab Fix usage of underscore in Runner Scale Set name (#3545)
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2025-08-06 09:32:49 +01:00
Nikola Jokic
c27541140a Remove JIT config from ephemeral runner status field (#4191) 2025-08-04 12:35:04 +02:00
github-actions[bot]
52d65c333b Updates: runner to v2.327.1 (#4188)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-29 10:07:13 +01:00
Alex Hatzenbuhler
a07dce28bb Remove deprecated preserveUnknownFields from CRDs (#4135) 2025-07-24 08:47:34 +02:00
github-actions[bot]
fb43abf1f3 Updates: runner to v2.327.0 (#4185)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-23 16:19:17 +01:00
Kylie Stradley
9c42f9f2e1 Add Missing Languages to CodeQL Advanced Configuration (#4179) 2025-07-16 11:16:19 +02:00
Cory Calahan
ad826725ce Update example GitHub URLs in values.yaml to include an example for enterprise account-level runners (#4181) 2025-07-16 10:40:38 +02:00
github-actions[bot]
4326693888 Updates: runner to v2.326.0 (#4176)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-14 11:39:15 -04:00
Nikola Jokic
469a0faec4 Remove workflow actions version comments since upgrades are done via dependabot (#4161) 2025-07-01 15:54:59 +02:00
Nikola Jokic
349cc0835e Fix image pull secrets list arguments in the chart (#4164) 2025-07-01 15:28:18 +02:00
Ho Kim
aa14f50e45 feat(runner): add ubuntu 24.04 support (#3598) 2025-07-01 18:34:52 +09:00
dependabot[bot]
ee8ca99e49 Bump the actions group across 1 directory with 5 updates (#4160)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-30 13:55:46 +02:00
adjn
6a13540076 Update CodeQL workflow for v3 (global-run-codeql.yaml) (#4157) 2025-06-30 11:16:11 +02:00
Nikola Jokic
ded39bede6 Prepare 0.12.1 release (#4153) 2025-06-27 13:49:47 +02:00
Nikola Jokic
9890c0592d Explicitly requeue during backoff ephemeral runner (#4152) 2025-06-27 12:05:43 +02:00
Nikola Jokic
3b5693eecb Remove check if runner exists after exit code 0 (#4142) 2025-06-27 11:11:39 +02:00
calx
e6e621a50a Remove duplicate float64 call (#4139) 2025-06-24 11:26:20 +02:00
Mark Huijgen
0b2534ebc9 Fix dind sidecar template (#4128) 2025-06-16 12:14:18 +02:00
Jeev B
e858d67926 Fix indentation of startupProbe attributes in dind sidecar (#4126) 2025-06-14 21:05:53 +02:00
Nikola Jokic
bc6c23609a Remove cache for build-push-action (#4124) 2025-06-13 15:26:55 +02:00
Nikola Jokic
666d0c52c4 Bump build-push-action to 6.18.0 (#4123) 2025-06-13 15:09:27 +02:00
Nikola Jokic
d9826e5244 Prepare 0.12.0 release (#4122) 2025-06-13 14:23:26 +02:00
dependabot[bot]
6f3882c482 Bump github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 (#4120)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-11 21:43:46 +02:00
Nikola Jokic
e46c929241 Azure Key Vault integration to resolve secrets (#4090) 2025-06-11 15:53:33 +02:00
Wim Fournier
d4af75d82e Delete config secret when listener pod gets deleted (#4033)
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2025-06-11 15:53:04 +02:00
Nash Luffman
e335f53037 Add response body to error when fetching access token (#4005)
Co-authored-by: mluffman <mluffman@thoughtmachine.net>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2025-06-11 15:52:38 +02:00
Tingluo Huang
c359d14e69 Avoid nil point when config.Metrics is nil and expose all metrics if none are configured (#4101)
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2025-06-11 15:51:26 +02:00
Nikola Jokic
9d8c59aeb3 Add startup probe to dind side-car (#4117) 2025-06-11 15:50:30 +02:00
dependabot[bot]
eef57e1a77 Bump github.com/cloudflare/circl from 1.6.0 to 1.6.1 (#4118)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-11 11:46:20 +02:00
Ryo Sakamoto
97697e80b4 Add job_workflow_ref label to listener metrics (#4054)
Signed-off-by: rskmm0chang <rskmm0chang@hatena.ne.jp>
2025-06-05 08:33:30 +02:00
github-actions[bot]
27b292bdd3 Updates: runner to v2.325.0 (#4109)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-06-03 11:05:40 -04:00
Nikola Jokic
1dbb88cb9e Allow use of client id as an app id (#4057) 2025-05-16 16:21:06 +02:00
Nikola Jokic
43f1cd0dac Refactor resource naming removing unnecessary calculations (#4076) 2025-05-15 10:56:34 +02:00
Nikola Jokic
389d842a30 Relax version requirements to allow patch version mismatch (#4080)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-05-14 21:38:16 +02:00
Nikola Jokic
f6f42dd4c1 Fix docker lint warnings (#4074) 2025-05-14 19:57:39 +02:00
github-actions[bot]
20e157fa72 Updates: runner to v2.324.0 container-hooks to v0.7.0 (#4086)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-05-14 10:05:33 -04:00
Nikola Jokic
cae7efa2c6 Create backoff mechanism for failed runners and allow re-creation of failed ephemeral runners (#4059) 2025-05-14 15:38:50 +02:00
Nikola Jokic
d6e2790db5 Bump go version (#4075) 2025-05-14 13:51:47 +02:00
scodef
a1a8dc5606 Add missing backtick to metrics.serviceMonitor.namespace line to correct formatting (#3790) 2025-05-07 14:52:42 +02:00
Mosè Giordano
16304b5ce7 Fix code block fences (#3140)
Co-authored-by: Mosè Giordano <giordano@users.noreply.github.com>
2025-05-06 13:47:13 +02:00
Borislav Velkov
32f19acc66 feat(helm): move dind to sidecar (#3842) 2025-04-23 14:51:01 +02:00
Ken Muse
46ee5cf9a2 Revised dashboard (#4022) 2025-04-23 11:36:05 +02:00
Ryosei Karaki
f832b0b254 upgrade(golangci-lint): v2.1.2 (#4023)
Signed-off-by: karamaru-alpha <mrnk3078@gmail.com>
2025-04-17 16:14:31 +02:00
Nikola Jokic
a33d34a036 Pin third party actions (#3981) 2025-04-17 12:19:15 +02:00
Nikola Jokic
15990d492d Include more context to errors raised by github/actions client (#4032)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-04-11 11:36:15 +02:00
dependabot[bot]
462db4dfc8 Bump the gomod group across 1 directory with 7 updates (#4008)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2025-04-07 16:51:07 +02:00
David Maxwell
ea27448da5 Fix busy runners metric (#4016) 2025-04-04 17:17:09 +02:00
148 changed files with 33453 additions and 20582 deletions

View File

@@ -1,9 +1,9 @@
name: 'Setup ARC E2E Test Action'
description: 'Build controller image, create kind cluster, load the image, and exchange ARC configure token.'
name: "Setup ARC E2E Test Action"
description: "Build controller image, create kind cluster, load the image, and exchange ARC configure token."
inputs:
app-id:
description: 'GitHub App Id for exchange access token'
description: "GitHub App Id for exchange access token"
required: true
app-pk:
description: "GitHub App private key for exchange access token"
@@ -20,30 +20,31 @@ inputs:
outputs:
token:
description: 'Token to use for configure ARC'
description: "Token to use for configure ARC"
value: ${{steps.config-token.outputs.token}}
runs:
using: "composite"
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
# failures pushing images to GHCR
version: v0.9.1
driver-opts: image=moby/buildkit:v0.10.6
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
# failures pushing images to GHCR
version: v0.9.1
driver-opts: image=moby/buildkit:v0.10.6
- name: Build controller image
uses: docker/build-push-action@v5
# https://github.com/docker/build-push-action/releases/tag/v6.18.0
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
with:
file: Dockerfile
platforms: linux/amd64
load: true
build-args: |
DOCKER_IMAGE_NAME=${{inputs.image-name}}
VERSION=${{inputs.image-tag}}
VERSION=${{inputs.image-tag}}
tags: |
${{inputs.image-name}}:${{inputs.image-tag}}
no-cache: true
@@ -56,8 +57,9 @@ runs:
- name: Get configure token
id: config-token
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ inputs.app-id }}
application_private_key: ${{ inputs.app-pk }}
organization: ${{ inputs.target-org}}
organization: ${{ inputs.target-org}}

View File

@@ -24,23 +24,27 @@ runs:
shell: bash
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# https://github.com/docker/setup-buildx-action/releases/tag/v3.10.0
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
with:
version: latest
- name: Login to DockerHub
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }}
uses: docker/login-action@v3
# https://github.com/docker/login-action/releases/tag/v3.4.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
with:
username: ${{ inputs.username }}
password: ${{ inputs.password }}
- name: Login to GitHub Container Registry
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }}
uses: docker/login-action@v3
# https://github.com/docker/login-action/releases/tag/v3.4.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
with:
registry: ghcr.io
username: ${{ inputs.ghcr_username }}

View File

@@ -5,18 +5,18 @@ name: Publish ARC Helm Charts
on:
push:
branches:
- master
- master
paths:
- 'charts/**'
- '.github/workflows/arc-publish-chart.yaml'
- '!charts/actions-runner-controller/docs/**'
- '!charts/gha-runner-scale-set-controller/**'
- '!charts/gha-runner-scale-set/**'
- '!**.md'
- "charts/**"
- ".github/workflows/arc-publish-chart.yaml"
- "!charts/actions-runner-controller/docs/**"
- "!charts/gha-runner-scale-set-controller/**"
- "!charts/gha-runner-scale-set/**"
- "!**.md"
workflow_dispatch:
inputs:
force:
description: 'Force publish even if the chart version is not bumped'
description: "Force publish even if the chart version is not bumped"
type: boolean
required: true
default: false
@@ -39,86 +39,86 @@ jobs:
outputs:
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
- name: Set up Helm
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
with:
version: ${{ env.HELM_VERSION }}
- name: Set up kube-score
run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score
- name: Set up kube-score
run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score
- name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
- name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v5
with:
python-version: '3.11'
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v6
with:
python-version: "3.11"
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
- name: Set up chart-testing
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
fi
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
fi
- name: Run chart-testing (lint)
run: |
ct lint --config charts/.ci/ct-config.yaml
- name: Run chart-testing (lint)
run: |
ct lint --config charts/.ci/ct-config.yaml
- name: Create kind cluster
if: steps.list-changed.outputs.changed == 'true'
uses: helm/kind-action@v1.4.0
- name: Create kind cluster
if: steps.list-changed.outputs.changed == 'true'
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
# We need cert-manager already installed in the cluster because we assume the CRDs exist
- name: Install cert-manager
if: steps.list-changed.outputs.changed == 'true'
run: |
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
# We need cert-manager already installed in the cluster because we assume the CRDs exist
- name: Install cert-manager
if: steps.list-changed.outputs.changed == 'true'
run: |
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
- name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true'
run: ct install --config charts/.ci/ct-config.yaml
- name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true'
run: ct install --config charts/.ci/ct-config.yaml
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
- name: Check if Chart Publish is Needed
id: publish-chart-step
run: |
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
- name: Check if Chart Publish is Needed
id: publish-chart-step
run: |
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
# Always publish if force is true
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
echo "publish=true" >> $GITHUB_OUTPUT
else
echo "publish=false" >> $GITHUB_OUTPUT
fi
# Always publish if force is true
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
echo "publish=true" >> $GITHUB_OUTPUT
else
echo "publish=false" >> $GITHUB_OUTPUT
fi
- name: Job summary
run: |
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
- name: Job summary
run: |
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
publish-chart:
if: needs.lint-chart.outputs.publish-chart == 'true'
@@ -133,80 +133,80 @@ jobs:
CHART_TARGET_BRANCH: master
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
organization: ${{ env.CHART_TARGET_ORG }}
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
organization: ${{ env.CHART_TARGET_ORG }}
- name: Install chart-releaser
uses: helm/chart-releaser-action@v1.4.1
with:
install_only: true
install_dir: ${{ github.workspace }}/bin
- name: Install chart-releaser
uses: helm/chart-releaser-action@cae68fefc6b5f367a0275617c9f83181ba54714f
with:
install_only: true
install_dir: ${{ github.workspace }}/bin
- name: Package and upload release assets
run: |
cr package \
${{ github.workspace }}/charts/actions-runner-controller/ \
--package-path .cr-release-packages
- name: Package and upload release assets
run: |
cr package \
${{ github.workspace }}/charts/actions-runner-controller/ \
--package-path .cr-release-packages
cr upload \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--package-path .cr-release-packages \
--token ${{ secrets.GITHUB_TOKEN }}
cr upload \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--package-path .cr-release-packages \
--token ${{ secrets.GITHUB_TOKEN }}
- name: Generate updated index.yaml
run: |
cr index \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--index-path ${{ github.workspace }}/index.yaml \
--token ${{ secrets.GITHUB_TOKEN }} \
--push \
--pages-branch 'gh-pages' \
--pages-index-path 'index.yaml'
- name: Generate updated index.yaml
run: |
cr index \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--index-path ${{ github.workspace }}/index.yaml \
--token ${{ secrets.GITHUB_TOKEN }} \
--push \
--pages-branch 'gh-pages' \
--pages-index-path 'index.yaml'
# Chart Release was never intended to publish to a different repo
# this workaround is intended to move the index.yaml to the target repo
# where the github pages are hosted
- name: Checkout target repository
uses: actions/checkout@v4
with:
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
path: ${{ env.CHART_TARGET_REPO }}
ref: ${{ env.CHART_TARGET_BRANCH }}
token: ${{ steps.get_workflow_token.outputs.token }}
# Chart Release was never intended to publish to a different repo
# this workaround is intended to move the index.yaml to the target repo
# where the github pages are hosted
- name: Checkout target repository
uses: actions/checkout@v5
with:
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
path: ${{ env.CHART_TARGET_REPO }}
ref: ${{ env.CHART_TARGET_BRANCH }}
token: ${{ steps.get_workflow_token.outputs.token }}
- name: Copy index.yaml
run: |
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
- name: Copy index.yaml
run: |
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
- name: Commit and push to target repository
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
git add .
git commit -m "Update index.yaml"
git push
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
- name: Commit and push to target repository
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
git add .
git commit -m "Update index.yaml"
git push
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
- name: Job summary
run: |
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
- name: Job summary
run: |
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY

View File

@@ -9,17 +9,17 @@ on:
workflow_dispatch:
inputs:
release_tag_name:
description: 'Tag name of the release to publish'
description: "Tag name of the release to publish"
required: true
push_to_registries:
description: 'Push images to registries'
description: "Push images to registries"
required: true
type: boolean
default: false
permissions:
contents: write
packages: write
contents: write
packages: write
env:
TARGET_ORG: actions-runner-controller
@@ -39,11 +39,11 @@ jobs:
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
- uses: actions/setup-go@v5
- uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
go-version-file: "go.mod"
- name: Install tools
run: |
@@ -73,7 +73,7 @@ jobs:
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}

View File

@@ -7,10 +7,10 @@ on:
# are available to the workflow run
push:
branches:
- 'master'
- "master"
paths:
- 'runner/VERSION'
- '.github/workflows/arc-release-runners.yaml'
- "runner/VERSION"
- ".github/workflows/arc-release-runners.yaml"
env:
# Safeguard to prevent pushing images to registeries after build
@@ -28,7 +28,7 @@ jobs:
name: Trigger Build and Push of Runner Images
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Get runner version
id: versions
run: |
@@ -39,7 +39,7 @@ jobs:
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}

View File

@@ -1,6 +1,9 @@
# This workflows polls releases from actions/runner and in case of a new one it
# updates files containing runner version and opens a pull request.
name: Runner Updates Check (Scheduled Job)
permissions:
pull-requests: write
contents: write
on:
schedule:
@@ -21,7 +24,7 @@ jobs:
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Get runner current and latest versions
id: runner_versions
@@ -50,6 +53,8 @@ jobs:
# it sets a PR name as output.
check_pr:
runs-on: ubuntu-latest
permissions:
contents: read
needs: check_versions
if: needs.check_versions.outputs.runner_current_version != needs.check_versions.outputs.runner_latest_version || needs.check_versions.outputs.container_hooks_current_version != needs.check_versions.outputs.container_hooks_latest_version
outputs:
@@ -64,7 +69,7 @@ jobs:
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: PR Name
id: pr_name
@@ -119,7 +124,7 @@ jobs:
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: New branch
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"

View File

@@ -5,20 +5,20 @@ on:
branches:
- master
paths:
- 'charts/**'
- '.github/workflows/arc-validate-chart.yaml'
- '!charts/actions-runner-controller/docs/**'
- '!**.md'
- '!charts/gha-runner-scale-set-controller/**'
- '!charts/gha-runner-scale-set/**'
- "charts/**"
- ".github/workflows/arc-validate-chart.yaml"
- "!charts/actions-runner-controller/docs/**"
- "!**.md"
- "!charts/gha-runner-scale-set-controller/**"
- "!charts/gha-runner-scale-set/**"
push:
paths:
- 'charts/**'
- '.github/workflows/arc-validate-chart.yaml'
- '!charts/actions-runner-controller/docs/**'
- '!**.md'
- '!charts/gha-runner-scale-set-controller/**'
- '!charts/gha-runner-scale-set/**'
- "charts/**"
- ".github/workflows/arc-validate-chart.yaml"
- "!charts/actions-runner-controller/docs/**"
- "!**.md"
- "!charts/gha-runner-scale-set-controller/**"
- "!charts/gha-runner-scale-set/**"
workflow_dispatch:
env:
KUBE_SCORE_VERSION: 1.10.0
@@ -40,39 +40,22 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
with:
version: ${{ env.HELM_VERSION }}
- name: Set up kube-score
run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score
- name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
--ignore-test pod-networkpolicy
--ignore-test deployment-has-poddisruptionbudget
--ignore-test deployment-has-host-podantiaffinity
--ignore-test container-security-context
--ignore-test pod-probes
--ignore-test container-image-tag
--enable-optional-test container-security-context-privileged
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v5
- uses: actions/setup-python@v6
with:
python-version: '3.11'
python-version: "3.11"
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
- name: Run chart-testing (list-changed)
id: list-changed
@@ -87,7 +70,7 @@ jobs:
ct lint --config charts/.ci/ct-config.yaml
- name: Create kind cluster
uses: helm/kind-action@v1.4.0
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
if: steps.list-changed.outputs.changed == 'true'
# We need cert-manager already installed in the cluster because we assume the CRDs exist

View File

@@ -3,17 +3,17 @@ name: Validate ARC Runners
on:
pull_request:
branches:
- '**'
- "**"
paths:
- 'runner/**'
- 'test/startup/**'
- '!**.md'
- "runner/**"
- "test/startup/**"
- "!**.md"
permissions:
contents: read
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
# This will make sure we only apply the concurrency limits on pull requests
# but not pushes to master branch by making the concurrency group name unique
# for pushes
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -24,29 +24,17 @@ jobs:
name: runner / shellcheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: shellcheck
uses: reviewdog/action-shellcheck@v1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
path: "./runner"
pattern: |
*.sh
*.bash
update-status
# Make this consistent with `make shellsheck`
shellcheck_flags: "--shell bash --source-path runner"
exclude: "./.git/*"
check_all_files_with_shebangs: "false"
# Set this to "true" once we addressed all the shellcheck findings
fail_on_error: "false"
- uses: actions/checkout@v5
- name: "Run shellcheck"
run: make shellcheck
test-runner-entrypoint:
name: Test entrypoint
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Checkout
uses: actions/checkout@v5
- name: Run tests
run: |
make acceptance/runner/startup
- name: Run tests
run: |
make acceptance/runner/startup

View File

@@ -16,7 +16,7 @@ env:
TARGET_ORG: actions-runner-controller
TARGET_REPO: arc_e2e_test_dummy
IMAGE_NAME: "arc-test-image"
IMAGE_VERSION: "0.11.0"
IMAGE_VERSION: "0.13.0"
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
@@ -33,7 +33,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
@@ -124,7 +124,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
@@ -217,7 +217,7 @@ jobs:
env:
WORKFLOW_FILE: arc-test-dind-workflow.yaml
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
@@ -309,7 +309,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
@@ -410,7 +410,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
@@ -513,7 +513,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
@@ -610,7 +610,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
@@ -732,7 +732,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
ref: ${{github.head_ref}}
@@ -904,7 +904,7 @@ jobs:
env:
WORKFLOW_FILE: arc-test-workflow.yaml
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
ref: ${{ github.head_ref }}

View File

@@ -4,27 +4,27 @@ on:
workflow_dispatch:
inputs:
ref:
description: 'The branch, tag or SHA to cut a release from'
description: "The branch, tag or SHA to cut a release from"
required: false
type: string
default: ''
default: ""
release_tag_name:
description: 'The name to tag the controller image with'
description: "The name to tag the controller image with"
required: true
type: string
default: 'canary'
default: "canary"
push_to_registries:
description: 'Push images to registries'
description: "Push images to registries"
required: true
type: boolean
default: false
publish_gha_runner_scale_set_controller_chart:
description: 'Publish new helm chart for gha-runner-scale-set-controller'
description: "Publish new helm chart for gha-runner-scale-set-controller"
required: true
type: boolean
default: false
publish_gha_runner_scale_set_chart:
description: 'Publish new helm chart for gha-runner-scale-set'
description: "Publish new helm chart for gha-runner-scale-set"
required: true
type: boolean
default: false
@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
@@ -72,10 +72,10 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
@@ -84,14 +84,14 @@ jobs:
driver-opts: image=moby/buildkit:v0.10.6
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build & push controller image
uses: docker/build-push-action@v5
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
with:
file: Dockerfile
platforms: linux/amd64,linux/arm64
@@ -100,8 +100,6 @@ jobs:
tags: |
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}-${{ steps.resolve_parameters.outputs.short_sha }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Job summary
run: |
@@ -121,7 +119,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
@@ -140,8 +138,7 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
with:
version: ${{ env.HELM_VERSION }}
@@ -169,7 +166,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
@@ -188,8 +185,7 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
with:
version: ${{ env.HELM_VERSION }}

View File

@@ -5,16 +5,16 @@ on:
branches:
- master
paths:
- 'charts/**'
- '.github/workflows/gha-validate-chart.yaml'
- '!charts/actions-runner-controller/**'
- '!**.md'
- "charts/**"
- ".github/workflows/gha-validate-chart.yaml"
- "!charts/actions-runner-controller/**"
- "!**.md"
push:
paths:
- 'charts/**'
- '.github/workflows/gha-validate-chart.yaml'
- '!charts/actions-runner-controller/**'
- '!**.md'
- "charts/**"
- ".github/workflows/gha-validate-chart.yaml"
- "!charts/actions-runner-controller/**"
- "!**.md"
workflow_dispatch:
env:
KUBE_SCORE_VERSION: 1.16.1
@@ -36,23 +36,22 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
with:
version: ${{ env.HELM_VERSION }}
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v5
- uses: actions/setup-python@v6
with:
python-version: '3.11'
python-version: "3.11"
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
- name: Run chart-testing (list-changed)
id: list-changed
@@ -68,13 +67,13 @@ jobs:
ct lint --config charts/.ci/ct-config-gha.yaml
- name: Set up docker buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
if: steps.list-changed.outputs.changed == 'true'
with:
version: latest
- name: Build controller image
uses: docker/build-push-action@v5
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
if: steps.list-changed.outputs.changed == 'true'
with:
file: Dockerfile
@@ -89,7 +88,7 @@ jobs:
cache-to: type=gha,mode=max
- name: Create kind cluster
uses: helm/kind-action@v1.4.0
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
if: steps.list-changed.outputs.changed == 'true'
with:
cluster_name: chart-testing
@@ -97,11 +96,11 @@ jobs:
- name: Load image into cluster
if: steps.list-changed.outputs.changed == 'true'
run: |
export DOCKER_IMAGE_NAME=test-arc
export VERSION=dev
export IMG_RESULT=load
make docker-buildx
kind load docker-image test-arc:dev --name chart-testing
export DOCKER_IMAGE_NAME=test-arc
export VERSION=dev
export IMG_RESULT=load
make docker-buildx
kind load docker-image test-arc:dev --name chart-testing
- name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true'
@@ -112,8 +111,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/setup-go@v5
uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: "go.mod"
cache: false

View File

@@ -7,30 +7,30 @@ on:
branches:
- master
paths-ignore:
- '**.md'
- '.github/actions/**'
- '.github/ISSUE_TEMPLATE/**'
- '.github/workflows/e2e-test-dispatch-workflow.yaml'
- '.github/workflows/gha-e2e-tests.yaml'
- '.github/workflows/arc-publish.yaml'
- '.github/workflows/arc-publish-chart.yaml'
- '.github/workflows/gha-publish-chart.yaml'
- '.github/workflows/arc-release-runners.yaml'
- '.github/workflows/global-run-codeql.yaml'
- '.github/workflows/global-run-first-interaction.yaml'
- '.github/workflows/global-run-stale.yaml'
- '.github/workflows/arc-update-runners-scheduled.yaml'
- '.github/workflows/validate-arc.yaml'
- '.github/workflows/arc-validate-chart.yaml'
- '.github/workflows/gha-validate-chart.yaml'
- '.github/workflows/arc-validate-runners.yaml'
- '.github/dependabot.yml'
- '.github/RELEASE_NOTE_TEMPLATE.md'
- 'runner/**'
- '.gitignore'
- 'PROJECT'
- 'LICENSE'
- 'Makefile'
- "**.md"
- ".github/actions/**"
- ".github/ISSUE_TEMPLATE/**"
- ".github/workflows/e2e-test-dispatch-workflow.yaml"
- ".github/workflows/gha-e2e-tests.yaml"
- ".github/workflows/arc-publish.yaml"
- ".github/workflows/arc-publish-chart.yaml"
- ".github/workflows/gha-publish-chart.yaml"
- ".github/workflows/arc-release-runners.yaml"
- ".github/workflows/global-run-codeql.yaml"
- ".github/workflows/global-run-first-interaction.yaml"
- ".github/workflows/global-run-stale.yaml"
- ".github/workflows/arc-update-runners-scheduled.yaml"
- ".github/workflows/validate-arc.yaml"
- ".github/workflows/arc-validate-chart.yaml"
- ".github/workflows/gha-validate-chart.yaml"
- ".github/workflows/arc-validate-runners.yaml"
- ".github/dependabot.yml"
- ".github/RELEASE_NOTE_TEMPLATE.md"
- "runner/**"
- ".gitignore"
- "PROJECT"
- "LICENSE"
- "Makefile"
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
permissions:
@@ -55,11 +55,11 @@ jobs:
TARGET_REPO: actions-runner-controller
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
@@ -90,10 +90,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -110,16 +110,16 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
with:
version: latest
# Unstable builds - run at your own risk
- name: Build and Push
uses: docker/build-push-action@v5
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
with:
context: .
file: ./Dockerfile

View File

@@ -25,20 +25,20 @@ jobs:
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Install Go
uses: actions/setup-go@v5
uses: actions/setup-go@v6
with:
go-version-file: go.mod
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
uses: github/codeql-action/init@v3
with:
languages: go
languages: go, actions
- name: Autobuild
uses: github/codeql-action/autobuild@v2
uses: github/codeql-action/autobuild@v3
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
uses: github/codeql-action/analyze@v3

View File

@@ -1,5 +1,10 @@
name: First Interaction
permissions:
contents: read
issues: write
pull-requests: write
on:
issues:
types: [opened]
@@ -11,7 +16,7 @@ jobs:
check_for_first_interaction:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- uses: actions/first-interaction@main
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -14,7 +14,7 @@ jobs:
issues: write # for actions/stale to close stale issues
pull-requests: write # for actions/stale to close stale PRs
steps:
- uses: actions/stale@v6
- uses: actions/stale@v10
with:
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
# turn off stale for both issues and PRs

View File

@@ -4,16 +4,16 @@ on:
branches:
- master
paths:
- '.github/workflows/go.yaml'
- '**.go'
- 'go.mod'
- 'go.sum'
- ".github/workflows/go.yaml"
- "**.go"
- "go.mod"
- "go.sum"
pull_request:
paths:
- '.github/workflows/go.yaml'
- '**.go'
- 'go.mod'
- 'go.sum'
- ".github/workflows/go.yaml"
- "**.go"
- "go.mod"
- "go.sum"
permissions:
contents: read
@@ -29,10 +29,10 @@ jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
go-version-file: "go.mod"
cache: false
- name: fmt
run: go fmt ./...
@@ -42,24 +42,24 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
go-version-file: "go.mod"
cache: false
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9
with:
only-new-issues: true
version: v1.55.2
version: v2.5.0
generate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
go-version-file: "go.mod"
cache: false
- name: Generate
run: make generate
@@ -69,10 +69,10 @@ jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
go-version-file: "go.mod"
- run: make manifests
- name: Check diff
run: git diff --exit-code

View File

@@ -1,19 +1,14 @@
version: "2"
run:
timeout: 3m
output:
formats:
- format: github-actions
path: stdout
linters-settings:
errcheck:
exclude-functions:
- (net/http.ResponseWriter).Write
- (*net/http.Server).Shutdown
- (*github.com/actions/actions-runner-controller/simulator.VisibleRunnerGroups).Add
- (*github.com/actions/actions-runner-controller/testing.Kind).Stop
issues:
exclude-rules:
- path: controllers/suite_test.go
linters:
- staticcheck
text: "SA1019"
timeout: 5m
linters:
settings:
errcheck:
exclude-functions:
- (net/http.ResponseWriter).Write
- (*net/http.Server).Shutdown
- (*github.com/actions/actions-runner-controller/simulator.VisibleRunnerGroups).Add
- (*github.com/actions/actions-runner-controller/testing.Kind).Stop
exclusions:
presets:
- std-error-handling

View File

@@ -1,2 +1,2 @@
# actions-runner-controller maintainers
* @mumoshu @toast-gear @actions/actions-launch @nikola-jokic @rentziass
* @mumoshu @toast-gear @actions/actions-launch @actions/actions-compute @nikola-jokic @rentziass

View File

@@ -1,5 +1,5 @@
# Build the manager binary
FROM --platform=$BUILDPLATFORM golang:1.24.0 as builder
FROM --platform=$BUILDPLATFORM golang:1.25.1 AS builder
WORKDIR /workspace
@@ -30,7 +30,7 @@ ARG TARGETPLATFORM TARGETOS TARGETARCH TARGETVARIANT VERSION=dev COMMIT_SHA=dev
# to avoid https://github.com/moby/buildkit/issues/2334
# We can use docker layer cache so the build is fast enogh anyway
# We also use per-platform GOCACHE for the same reason.
ENV GOCACHE /build/${TARGETPLATFORM}/root/.cache/go-build
ENV GOCACHE="/build/${TARGETPLATFORM}/root/.cache/go-build"
# Build
RUN --mount=target=. \

View File

@@ -6,7 +6,7 @@ endif
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
VERSION ?= dev
COMMIT_SHA = $(shell git rev-parse HEAD)
RUNNER_VERSION ?= 2.323.0
RUNNER_VERSION ?= 2.328.0
TARGETPLATFORM ?= $(shell arch)
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION}
@@ -20,7 +20,7 @@ KUBECONTEXT ?= kind-acceptance
CLUSTER ?= acceptance
CERT_MANAGER_VERSION ?= v1.1.1
KUBE_RBAC_PROXY_VERSION ?= v0.11.0
SHELLCHECK_VERSION ?= 0.8.0
SHELLCHECK_VERSION ?= 0.10.0
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=true"
@@ -68,7 +68,7 @@ endif
all: manager
lint:
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.57.2 golangci-lint run
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v2.5.0 golangci-lint run
GO_TEST_ARGS ?= -short
@@ -117,9 +117,6 @@ manifests: manifests-gen-crds chart-crds
manifests-gen-crds: controller-gen yq
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
for YAMLFILE in config/crd/bases/actions*.yaml; do \
$(YQ) '.spec.preserveUnknownFields = false' --inplace "$$YAMLFILE" ; \
done
make manifests-gen-crds-fix DELETE_KEY=x-kubernetes-list-type
make manifests-gen-crds-fix DELETE_KEY=x-kubernetes-list-map-keys
@@ -204,7 +201,7 @@ generate: controller-gen
# Run shellcheck on runner scripts
shellcheck: shellcheck-install
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh hack/*.sh
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh runner/update-status hack/*.sh
docker-buildx:
export DOCKER_CLI_EXPERIMENTAL=enabled ;\
@@ -310,7 +307,7 @@ github-release: release
# Otherwise we get errors like the below:
# Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property]
#
# Note that controller-gen newer than 0.6.2 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
# Note that controller-gen newer than 0.7.0 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
# Otherwise ObjectMeta embedded in Spec results in empty on the storage.
controller-gen:
ifeq (, $(shell which controller-gen))
@@ -320,7 +317,7 @@ ifeq (, $(wildcard $(GOBIN)/controller-gen))
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
cd $$CONTROLLER_GEN_TMP_DIR ;\
go mod init tmp ;\
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.17.2 ;\
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.19.0 ;\
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
}
endif

View File

@@ -5,22 +5,23 @@ on:
env:
IRSA_ROLE_ARN:
ASSUME_ROLE_ARN:
AWS_REGION:
ASSUME_ROLE_ARN:
AWS_REGION:
jobs:
assume-role-in-runner-test:
runs-on: ['self-hosted', 'Linux']
runs-on: ["self-hosted", "Linux"]
steps:
- name: Test aws-actions/configure-aws-credentials Action
uses: aws-actions/configure-aws-credentials@v1
# https://github.com/aws-actions/configure-aws-credentials/releases/tag/v4.1.0
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722
with:
aws-region: ${{ env.AWS_REGION }}
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
role-duration-seconds: 900
assume-role-in-container-test:
runs-on: ['self-hosted', 'Linux']
container:
runs-on: ["self-hosted", "Linux"]
container:
image: amazon/aws-cli
env:
AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
@@ -29,7 +30,8 @@ jobs:
- /var/run/secrets/eks.amazonaws.com/serviceaccount/token:/var/run/secrets/eks.amazonaws.com/serviceaccount/token
steps:
- name: Test aws-actions/configure-aws-credentials Action in container
uses: aws-actions/configure-aws-credentials@v1
# https://github.com/aws-actions/configure-aws-credentials/releases/tag/v4.1.0
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722
with:
aws-region: ${{ env.AWS_REGION }}
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}

View File

@@ -8,8 +8,8 @@ env:
jobs:
run-step-in-container-test:
runs-on: ['self-hosted', 'Linux']
container:
runs-on: ["self-hosted", "Linux"]
container:
image: alpine
steps:
- name: Test we are working in the container
@@ -21,7 +21,7 @@ jobs:
exit 1
fi
setup-python-test:
runs-on: ['self-hosted', 'Linux']
runs-on: ["self-hosted", "Linux"]
steps:
- name: Print native Python environment
run: |
@@ -41,12 +41,12 @@ jobs:
echo "Python version detected : $(python --version 2>&1)"
fi
setup-node-test:
runs-on: ['self-hosted', 'Linux']
runs-on: ["self-hosted", "Linux"]
steps:
- uses: actions/setup-node@v2
with:
node-version: '12'
- name: Test actions/setup-node works
node-version: "12"
- name: Test actions/setup-node works
run: |
VERSION=$(node --version | cut -c 2- | cut -d '.' -f1)
if [[ $VERSION != '12' ]]; then
@@ -57,13 +57,14 @@ jobs:
echo "Node version detected : $(node --version 2>&1)"
fi
setup-ruby-test:
runs-on: ['self-hosted', 'Linux']
runs-on: ["self-hosted", "Linux"]
steps:
- uses: ruby/setup-ruby@v1
# https://github.com/ruby/setup-ruby/releases/tag/v1.227.0
- uses: ruby/setup-ruby@1a615958ad9d422dd932dc1d5823942ee002799f
with:
ruby-version: 3.0
bundler-cache: true
- name: Test ruby/setup-ruby works
- name: Test ruby/setup-ruby works
run: |
VERSION=$(ruby --version | cut -d ' ' -f2 | cut -d '.' -f1-2)
if [[ $VERSION != '3.0' ]]; then
@@ -74,8 +75,8 @@ jobs:
echo "Ruby version detected : $(ruby --version 2>&1)"
fi
python-shell-test:
runs-on: ['self-hosted', 'Linux']
steps:
runs-on: ["self-hosted", "Linux"]
steps:
- name: Test Python shell works
run: |
import os

View File

@@ -0,0 +1,89 @@
package appconfig
import (
"bytes"
"encoding/json"
"fmt"
"strconv"
corev1 "k8s.io/api/core/v1"
)
type AppConfig struct {
AppID string `json:"github_app_id"`
AppInstallationID int64 `json:"github_app_installation_id"`
AppPrivateKey string `json:"github_app_private_key"`
Token string `json:"github_token"`
}
func (c *AppConfig) tidy() *AppConfig {
if len(c.Token) > 0 {
return &AppConfig{
Token: c.Token,
}
}
return &AppConfig{
AppID: c.AppID,
AppInstallationID: c.AppInstallationID,
AppPrivateKey: c.AppPrivateKey,
}
}
func (c *AppConfig) Validate() error {
if c == nil {
return fmt.Errorf("missing app config")
}
hasToken := len(c.Token) > 0
hasGitHubAppAuth := c.hasGitHubAppAuth()
if hasToken && hasGitHubAppAuth {
return fmt.Errorf("both PAT and GitHub App credentials provided. should only provide one")
}
if !hasToken && !hasGitHubAppAuth {
return fmt.Errorf("no credentials provided: either a PAT or GitHub App credentials should be provided")
}
return nil
}
func (c *AppConfig) hasGitHubAppAuth() bool {
return len(c.AppID) > 0 && c.AppInstallationID > 0 && len(c.AppPrivateKey) > 0
}
func FromSecret(secret *corev1.Secret) (*AppConfig, error) {
var appInstallationID int64
if v := string(secret.Data["github_app_installation_id"]); v != "" {
val, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
appInstallationID = val
}
cfg := &AppConfig{
Token: string(secret.Data["github_token"]),
AppID: string(secret.Data["github_app_id"]),
AppInstallationID: appInstallationID,
AppPrivateKey: string(secret.Data["github_app_private_key"]),
}
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("failed to validate config: %v", err)
}
return cfg.tidy(), nil
}
func FromJSONString(v string) (*AppConfig, error) {
var appConfig AppConfig
if err := json.NewDecoder(bytes.NewBufferString(v)).Decode(&appConfig); err != nil {
return nil, err
}
if err := appConfig.Validate(); err != nil {
return nil, fmt.Errorf("failed to validate app config decoded from string: %w", err)
}
return appConfig.tidy(), nil
}

View File

@@ -0,0 +1,152 @@
package appconfig
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
)
func TestAppConfigValidate_invalid(t *testing.T) {
tt := map[string]*AppConfig{
"empty": {},
"token and app config": {
AppID: "1",
AppInstallationID: 2,
AppPrivateKey: "private key",
Token: "token",
},
"app id not set": {
AppInstallationID: 2,
AppPrivateKey: "private key",
},
"app installation id not set": {
AppID: "2",
AppPrivateKey: "private key",
},
"private key empty": {
AppID: "2",
AppInstallationID: 1,
AppPrivateKey: "",
},
}
for name, cfg := range tt {
t.Run(name, func(t *testing.T) {
err := cfg.Validate()
require.Error(t, err)
})
}
}
func TestAppConfigValidate_valid(t *testing.T) {
tt := map[string]*AppConfig{
"token": {
Token: "token",
},
"app ID": {
AppID: "1",
AppInstallationID: 2,
AppPrivateKey: "private key",
},
}
for name, cfg := range tt {
t.Run(name, func(t *testing.T) {
err := cfg.Validate()
require.NoError(t, err)
})
}
}
func TestAppConfigFromSecret_invalid(t *testing.T) {
tt := map[string]map[string]string{
"empty": {},
"token and app provided": {
"github_token": "token",
"github_app_id": "2",
"githu_app_installation_id": "3",
"github_app_private_key": "private key",
},
"invalid app id": {
"github_app_id": "abc",
"githu_app_installation_id": "3",
"github_app_private_key": "private key",
},
"invalid app installation_id": {
"github_app_id": "1",
"githu_app_installation_id": "abc",
"github_app_private_key": "private key",
},
"empty private key": {
"github_app_id": "1",
"githu_app_installation_id": "2",
"github_app_private_key": "",
},
}
for name, data := range tt {
t.Run(name, func(t *testing.T) {
secret := &corev1.Secret{
StringData: data,
}
appConfig, err := FromSecret(secret)
assert.Error(t, err)
assert.Nil(t, appConfig)
})
}
}
func TestAppConfigFromSecret_valid(t *testing.T) {
tt := map[string]map[string]string{
"with token": {
"github_token": "token",
},
"app config": {
"github_app_id": "2",
"githu_app_installation_id": "3",
"github_app_private_key": "private key",
},
}
for name, data := range tt {
t.Run(name, func(t *testing.T) {
secret := &corev1.Secret{
StringData: data,
}
appConfig, err := FromSecret(secret)
assert.Error(t, err)
assert.Nil(t, appConfig)
})
}
}
func TestAppConfigFromString_valid(t *testing.T) {
tt := map[string]*AppConfig{
"token": {
Token: "token",
},
"app ID": {
AppID: "1",
AppInstallationID: 2,
AppPrivateKey: "private key",
},
}
for name, cfg := range tt {
t.Run(name, func(t *testing.T) {
bytes, err := json.Marshal(cfg)
require.NoError(t, err)
got, err := FromJSONString(string(bytes))
require.NoError(t, err)
want := cfg.tidy()
assert.Equal(t, want, got)
})
}
}

View File

@@ -59,7 +59,10 @@ type AutoscalingListenerSpec struct {
Proxy *ProxyConfig `json:"proxy,omitempty"`
// +optional
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
GitHubServerTLS *TLSConfig `json:"githubServerTLS,omitempty"`
// +optional
VaultConfig *VaultConfig `json:"vaultConfig,omitempty"`
// +optional
Metrics *MetricsConfig `json:"metrics,omitempty"`
@@ -87,7 +90,6 @@ type AutoscalingListener struct {
}
// +kubebuilder:object:root=true
// AutoscalingListenerList contains a list of AutoscalingListener
type AutoscalingListenerList struct {
metav1.TypeMeta `json:",inline"`

View File

@@ -24,6 +24,7 @@ import (
"strings"
"github.com/actions/actions-runner-controller/hash"
"github.com/actions/actions-runner-controller/vault"
"golang.org/x/net/http/httpproxy"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -69,7 +70,10 @@ type AutoscalingRunnerSetSpec struct {
Proxy *ProxyConfig `json:"proxy,omitempty"`
// +optional
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
GitHubServerTLS *TLSConfig `json:"githubServerTLS,omitempty"`
// +optional
VaultConfig *VaultConfig `json:"vaultConfig,omitempty"`
// Required
Template corev1.PodTemplateSpec `json:"template,omitempty"`
@@ -89,12 +93,12 @@ type AutoscalingRunnerSetSpec struct {
MinRunners *int `json:"minRunners,omitempty"`
}
type GitHubServerTLSConfig struct {
type TLSConfig struct {
// Required
CertificateFrom *TLSCertificateSource `json:"certificateFrom,omitempty"`
}
func (c *GitHubServerTLSConfig) ToCertPool(keyFetcher func(name, key string) ([]byte, error)) (*x509.CertPool, error) {
func (c *TLSConfig) ToCertPool(keyFetcher func(name, key string) ([]byte, error)) (*x509.CertPool, error) {
if c.CertificateFrom == nil {
return nil, fmt.Errorf("certificateFrom not specified")
}
@@ -142,7 +146,7 @@ type ProxyConfig struct {
NoProxy []string `json:"noProxy,omitempty"`
}
func (c *ProxyConfig) toHTTPProxyConfig(secretFetcher func(string) (*corev1.Secret, error)) (*httpproxy.Config, error) {
func (c *ProxyConfig) ToHTTPProxyConfig(secretFetcher func(string) (*corev1.Secret, error)) (*httpproxy.Config, error) {
config := &httpproxy.Config{
NoProxy: strings.Join(c.NoProxy, ","),
}
@@ -201,7 +205,7 @@ func (c *ProxyConfig) toHTTPProxyConfig(secretFetcher func(string) (*corev1.Secr
}
func (c *ProxyConfig) ToSecretData(secretFetcher func(string) (*corev1.Secret, error)) (map[string][]byte, error) {
config, err := c.toHTTPProxyConfig(secretFetcher)
config, err := c.ToHTTPProxyConfig(secretFetcher)
if err != nil {
return nil, err
}
@@ -215,7 +219,7 @@ func (c *ProxyConfig) ToSecretData(secretFetcher func(string) (*corev1.Secret, e
}
func (c *ProxyConfig) ProxyFunc(secretFetcher func(string) (*corev1.Secret, error)) (func(*http.Request) (*url.URL, error), error) {
config, err := c.toHTTPProxyConfig(secretFetcher)
config, err := c.ToHTTPProxyConfig(secretFetcher)
if err != nil {
return nil, err
}
@@ -235,6 +239,26 @@ type ProxyServerConfig struct {
CredentialSecretRef string `json:"credentialSecretRef,omitempty"`
}
type VaultConfig struct {
// +optional
Type vault.VaultType `json:"type,omitempty"`
// +optional
AzureKeyVault *AzureKeyVaultConfig `json:"azureKeyVault,omitempty"`
// +optional
Proxy *ProxyConfig `json:"proxy,omitempty"`
}
type AzureKeyVaultConfig struct {
// +required
URL string `json:"url,omitempty"`
// +required
TenantID string `json:"tenantId,omitempty"`
// +required
ClientID string `json:"clientId,omitempty"`
// +required
CertificatePath string `json:"certificatePath,omitempty"`
}
// MetricsConfig holds configuration parameters for each metric type
type MetricsConfig struct {
// +optional
@@ -285,6 +309,33 @@ func (ars *AutoscalingRunnerSet) ListenerSpecHash() string {
return hash.ComputeTemplateHash(&spec)
}
func (ars *AutoscalingRunnerSet) GitHubConfigSecret() string {
return ars.Spec.GitHubConfigSecret
}
func (ars *AutoscalingRunnerSet) GitHubConfigUrl() string {
return ars.Spec.GitHubConfigUrl
}
func (ars *AutoscalingRunnerSet) GitHubProxy() *ProxyConfig {
return ars.Spec.Proxy
}
func (ars *AutoscalingRunnerSet) GitHubServerTLS() *TLSConfig {
return ars.Spec.GitHubServerTLS
}
func (ars *AutoscalingRunnerSet) VaultConfig() *VaultConfig {
return ars.Spec.VaultConfig
}
func (ars *AutoscalingRunnerSet) VaultProxy() *ProxyConfig {
if ars.Spec.VaultConfig != nil {
return ars.Spec.VaultConfig.Proxy
}
return nil
}
func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string {
type runnerSetSpec struct {
GitHubConfigUrl string
@@ -292,7 +343,7 @@ func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string {
RunnerGroup string
RunnerScaleSetName string
Proxy *ProxyConfig
GitHubServerTLS *GitHubServerTLSConfig
GitHubServerTLS *TLSConfig
Template corev1.PodTemplateSpec
}
spec := &runnerSetSpec{

View File

@@ -34,6 +34,7 @@ const EphemeralRunnerContainerName = "runner"
// +kubebuilder:printcolumn:JSONPath=".status.jobWorkflowRef",name=JobWorkflowRef,type=string
// +kubebuilder:printcolumn:JSONPath=".status.workflowRunId",name=WorkflowRunId,type=number
// +kubebuilder:printcolumn:JSONPath=".status.jobDisplayName",name=JobDisplayName,type=string
// +kubebuilder:printcolumn:JSONPath=".status.jobId",name=JobId,type=string
// +kubebuilder:printcolumn:JSONPath=".status.message",name=Message,type=string
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
@@ -50,6 +51,10 @@ func (er *EphemeralRunner) IsDone() bool {
return er.Status.Phase == corev1.PodSucceeded || er.Status.Phase == corev1.PodFailed
}
func (er *EphemeralRunner) HasJob() bool {
return len(er.Status.JobID) > 0
}
func (er *EphemeralRunner) HasContainerHookConfigured() bool {
for i := range er.Spec.Spec.Containers {
if er.Spec.Spec.Containers[i].Name != EphemeralRunnerContainerName {
@@ -67,6 +72,33 @@ func (er *EphemeralRunner) HasContainerHookConfigured() bool {
return false
}
func (er *EphemeralRunner) GitHubConfigSecret() string {
return er.Spec.GitHubConfigSecret
}
func (er *EphemeralRunner) GitHubConfigUrl() string {
return er.Spec.GitHubConfigUrl
}
func (er *EphemeralRunner) GitHubProxy() *ProxyConfig {
return er.Spec.Proxy
}
func (er *EphemeralRunner) GitHubServerTLS() *TLSConfig {
return er.Spec.GitHubServerTLS
}
func (er *EphemeralRunner) VaultConfig() *VaultConfig {
return er.Spec.VaultConfig
}
func (er *EphemeralRunner) VaultProxy() *ProxyConfig {
if er.Spec.VaultConfig != nil {
return er.Spec.VaultConfig.Proxy
}
return nil
}
// EphemeralRunnerSpec defines the desired state of EphemeralRunner
type EphemeralRunnerSpec struct {
// +required
@@ -75,6 +107,9 @@ type EphemeralRunnerSpec struct {
// +required
GitHubConfigSecret string `json:"githubConfigSecret,omitempty"`
// +optional
GitHubServerTLS *TLSConfig `json:"githubServerTLS,omitempty"`
// +required
RunnerScaleSetId int `json:"runnerScaleSetId,omitempty"`
@@ -85,7 +120,7 @@ type EphemeralRunnerSpec struct {
ProxySecretRef string `json:"proxySecretRef,omitempty"`
// +optional
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
VaultConfig *VaultConfig `json:"vaultConfig,omitempty"`
corev1.PodTemplateSpec `json:",inline"`
}
@@ -115,15 +150,16 @@ type EphemeralRunnerStatus struct {
RunnerId int `json:"runnerId,omitempty"`
// +optional
RunnerName string `json:"runnerName,omitempty"`
// +optional
RunnerJITConfig string `json:"runnerJITConfig,omitempty"`
// +optional
Failures map[string]bool `json:"failures,omitempty"`
Failures map[string]metav1.Time `json:"failures,omitempty"`
// +optional
JobRequestId int64 `json:"jobRequestId,omitempty"`
// +optional
JobID string `json:"jobId,omitempty"`
// +optional
JobRepositoryName string `json:"jobRepositoryName,omitempty"`
@@ -137,6 +173,20 @@ type EphemeralRunnerStatus struct {
JobDisplayName string `json:"jobDisplayName,omitempty"`
}
func (s *EphemeralRunnerStatus) LastFailure() metav1.Time {
var maxTime metav1.Time
if len(s.Failures) == 0 {
return maxTime
}
for _, ts := range s.Failures {
if ts.After(maxTime.Time) {
maxTime = ts
}
}
return maxTime
}
// +kubebuilder:object:root=true
// EphemeralRunnerList contains a list of EphemeralRunner

View File

@@ -60,9 +60,35 @@ type EphemeralRunnerSet struct {
Status EphemeralRunnerSetStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
func (ers *EphemeralRunnerSet) GitHubConfigSecret() string {
return ers.Spec.EphemeralRunnerSpec.GitHubConfigSecret
}
func (ers *EphemeralRunnerSet) GitHubConfigUrl() string {
return ers.Spec.EphemeralRunnerSpec.GitHubConfigUrl
}
func (ers *EphemeralRunnerSet) GitHubProxy() *ProxyConfig {
return ers.Spec.EphemeralRunnerSpec.Proxy
}
func (ers *EphemeralRunnerSet) GitHubServerTLS() *TLSConfig {
return ers.Spec.EphemeralRunnerSpec.GitHubServerTLS
}
func (ers *EphemeralRunnerSet) VaultConfig() *VaultConfig {
return ers.Spec.EphemeralRunnerSpec.VaultConfig
}
func (ers *EphemeralRunnerSet) VaultProxy() *ProxyConfig {
if ers.Spec.EphemeralRunnerSpec.VaultConfig != nil {
return ers.Spec.EphemeralRunnerSpec.VaultConfig.Proxy
}
return nil
}
// EphemeralRunnerSetList contains a list of EphemeralRunnerSet
// +kubebuilder:object:root=true
type EphemeralRunnerSetList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`

View File

@@ -17,7 +17,7 @@ import (
func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) {
t.Run("returns an error if CertificateFrom not specified", func(t *testing.T) {
c := &v1alpha1.GitHubServerTLSConfig{
c := &v1alpha1.TLSConfig{
CertificateFrom: nil,
}
@@ -29,7 +29,7 @@ func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) {
})
t.Run("returns an error if CertificateFrom.ConfigMapKeyRef not specified", func(t *testing.T) {
c := &v1alpha1.GitHubServerTLSConfig{
c := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{},
}
@@ -41,7 +41,7 @@ func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) {
})
t.Run("returns a valid cert pool with correct configuration", func(t *testing.T) {
c := &v1alpha1.GitHubServerTLSConfig{
c := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{

View File

@@ -0,0 +1,72 @@
package v1alpha1
import "strings"
func IsVersionAllowed(resourceVersion, buildVersion string) bool {
if buildVersion == "dev" || resourceVersion == buildVersion || strings.HasPrefix(buildVersion, "canary-") {
return true
}
rv, ok := parseSemver(resourceVersion)
if !ok {
return false
}
bv, ok := parseSemver(buildVersion)
if !ok {
return false
}
return rv.major == bv.major && rv.minor == bv.minor
}
type semver struct {
major string
minor string
}
func parseSemver(v string) (p semver, ok bool) {
if v == "" {
return
}
p.major, v, ok = parseInt(v)
if !ok {
return p, false
}
if v == "" {
p.minor = "0"
return p, true
}
if v[0] != '.' {
return p, false
}
p.minor, v, ok = parseInt(v[1:])
if !ok {
return p, false
}
if v == "" {
return p, true
}
if v[0] != '.' {
return p, false
}
if _, _, ok = parseInt(v[1:]); !ok {
return p, false
}
return p, true
}
func parseInt(v string) (t, rest string, ok bool) {
if v == "" {
return
}
if v[0] < '0' || '9' < v[0] {
return
}
i := 1
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
if v[0] == '0' && i != 1 {
return
}
return v[:i], v[i:], true
}

View File

@@ -0,0 +1,60 @@
package v1alpha1_test
import (
"testing"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/stretchr/testify/assert"
)
func TestIsVersionAllowed(t *testing.T) {
t.Parallel()
tt := map[string]struct {
resourceVersion string
buildVersion string
want bool
}{
"dev should always be allowed": {
resourceVersion: "0.11.0",
buildVersion: "dev",
want: true,
},
"resourceVersion is not semver": {
resourceVersion: "dev",
buildVersion: "0.11.0",
want: false,
},
"buildVersion is not semver": {
resourceVersion: "0.11.0",
buildVersion: "NA",
want: false,
},
"major version mismatch": {
resourceVersion: "0.11.0",
buildVersion: "1.11.0",
want: false,
},
"minor version mismatch": {
resourceVersion: "0.11.0",
buildVersion: "0.10.0",
want: false,
},
"patch version mismatch": {
resourceVersion: "0.11.1",
buildVersion: "0.11.0",
want: true,
},
"arbitrary version match": {
resourceVersion: "abc",
buildVersion: "abc",
want: true,
},
}
for name, tc := range tt {
t.Run(name, func(t *testing.T) {
got := v1alpha1.IsVersionAllowed(tc.resourceVersion, tc.buildVersion)
assert.Equal(t, tc.want, got)
})
}
}

View File

@@ -22,6 +22,7 @@ package v1alpha1
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -99,7 +100,12 @@ func (in *AutoscalingListenerSpec) DeepCopyInto(out *AutoscalingListenerSpec) {
}
if in.GitHubServerTLS != nil {
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
*out = new(GitHubServerTLSConfig)
*out = new(TLSConfig)
(*in).DeepCopyInto(*out)
}
if in.VaultConfig != nil {
in, out := &in.VaultConfig, &out.VaultConfig
*out = new(VaultConfig)
(*in).DeepCopyInto(*out)
}
if in.Metrics != nil {
@@ -208,7 +214,12 @@ func (in *AutoscalingRunnerSetSpec) DeepCopyInto(out *AutoscalingRunnerSetSpec)
}
if in.GitHubServerTLS != nil {
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
*out = new(GitHubServerTLSConfig)
*out = new(TLSConfig)
(*in).DeepCopyInto(*out)
}
if in.VaultConfig != nil {
in, out := &in.VaultConfig, &out.VaultConfig
*out = new(VaultConfig)
(*in).DeepCopyInto(*out)
}
in.Template.DeepCopyInto(&out.Template)
@@ -259,6 +270,21 @@ func (in *AutoscalingRunnerSetStatus) DeepCopy() *AutoscalingRunnerSetStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureKeyVaultConfig) DeepCopyInto(out *AzureKeyVaultConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureKeyVaultConfig.
func (in *AzureKeyVaultConfig) DeepCopy() *AzureKeyVaultConfig {
if in == nil {
return nil
}
out := new(AzureKeyVaultConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CounterMetric) DeepCopyInto(out *CounterMetric) {
*out = *in
@@ -431,14 +457,19 @@ func (in *EphemeralRunnerSetStatus) DeepCopy() *EphemeralRunnerSetStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EphemeralRunnerSpec) DeepCopyInto(out *EphemeralRunnerSpec) {
*out = *in
if in.GitHubServerTLS != nil {
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
*out = new(TLSConfig)
(*in).DeepCopyInto(*out)
}
if in.Proxy != nil {
in, out := &in.Proxy, &out.Proxy
*out = new(ProxyConfig)
(*in).DeepCopyInto(*out)
}
if in.GitHubServerTLS != nil {
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
*out = new(GitHubServerTLSConfig)
if in.VaultConfig != nil {
in, out := &in.VaultConfig, &out.VaultConfig
*out = new(VaultConfig)
(*in).DeepCopyInto(*out)
}
in.PodTemplateSpec.DeepCopyInto(&out.PodTemplateSpec)
@@ -459,9 +490,9 @@ func (in *EphemeralRunnerStatus) DeepCopyInto(out *EphemeralRunnerStatus) {
*out = *in
if in.Failures != nil {
in, out := &in.Failures, &out.Failures
*out = make(map[string]bool, len(*in))
*out = make(map[string]metav1.Time, len(*in))
for key, val := range *in {
(*out)[key] = val
(*out)[key] = *val.DeepCopy()
}
}
}
@@ -496,26 +527,6 @@ func (in *GaugeMetric) DeepCopy() *GaugeMetric {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitHubServerTLSConfig) DeepCopyInto(out *GitHubServerTLSConfig) {
*out = *in
if in.CertificateFrom != nil {
in, out := &in.CertificateFrom, &out.CertificateFrom
*out = new(TLSCertificateSource)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubServerTLSConfig.
func (in *GitHubServerTLSConfig) DeepCopy() *GitHubServerTLSConfig {
if in == nil {
return nil
}
out := new(GitHubServerTLSConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HistogramMetric) DeepCopyInto(out *HistogramMetric) {
*out = *in
@@ -668,3 +679,48 @@ func (in *TLSCertificateSource) DeepCopy() *TLSCertificateSource {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TLSConfig) DeepCopyInto(out *TLSConfig) {
*out = *in
if in.CertificateFrom != nil {
in, out := &in.CertificateFrom, &out.CertificateFrom
*out = new(TLSCertificateSource)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig.
func (in *TLSConfig) DeepCopy() *TLSConfig {
if in == nil {
return nil
}
out := new(TLSConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VaultConfig) DeepCopyInto(out *VaultConfig) {
*out = *in
if in.AzureKeyVault != nil {
in, out := &in.AzureKeyVault, &out.AzureKeyVault
*out = new(AzureKeyVaultConfig)
**out = **in
}
if in.Proxy != nil {
in, out := &in.Proxy, &out.Proxy
*out = new(ProxyConfig)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultConfig.
func (in *VaultConfig) DeepCopy() *VaultConfig {
if in == nil {
return nil
}
out := new(VaultConfig)
in.DeepCopyInto(out)
return out
}

View File

@@ -215,10 +215,10 @@ func (rs *RunnerSpec) validateRepository() error {
foundCount += 1
}
if foundCount == 0 {
return errors.New("Spec needs enterprise, organization or repository")
return errors.New("spec needs enterprise, organization or repository")
}
if foundCount > 1 {
return errors.New("Spec cannot have many fields defined enterprise, organization and repository")
return errors.New("spec cannot have many fields defined enterprise, organization and repository")
}
return nil

View File

@@ -1,9 +1,11 @@
# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow
remote: origin
target-branch: master
lint-conf: charts/.ci/lint-config.yaml
chart-repos:
- jetstack=https://charts.jetstack.io
check-version-increment: false # Disable checking that the chart version has been bumped
charts:
- charts/gha-runner-scale-set-controller
- charts/gha-runner-scale-set
- charts/gha-runner-scale-set-controller
- charts/gha-runner-scale-set
skip-clean-up: true

View File

@@ -1,7 +1,9 @@
# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow
remote: origin
target-branch: master
lint-conf: charts/.ci/lint-config.yaml
chart-repos:
- jetstack=https://charts.jetstack.io
check-version-increment: false # Disable checking that the chart version has been bumped
charts:
- charts/actions-runner-controller
- charts/actions-runner-controller

View File

@@ -1,6 +1,5 @@
#!/bin/bash
for chart in `ls charts`;
do
helm template --values charts/$chart/ci/ci-values.yaml charts/$chart | kube-score score - \
@@ -12,4 +11,4 @@ helm template --values charts/$chart/ci/ci-values.yaml charts/$chart | kube-scor
--enable-optional-test container-security-context-privileged \
--enable-optional-test container-security-context-readonlyrootfilesystem \
--ignore-test container-security-context
done
done

View File

@@ -44,7 +44,7 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
| `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
| `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
| `metrics.serviceMonitor.namespace | Namespace which Prometheus is running in | `Release.Namespace` (the default namespace of the helm chart). |
| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `Release.Namespace` (the default namespace of the helm chart). |
| `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
| `metrics.port` | Set port of metrics service | 8443 |

View File

@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.17.2
controller-gen.kubebuilder.io/version: v0.19.0
name: horizontalrunnerautoscalers.actions.summerwind.dev
spec:
group: actions.summerwind.dev
@@ -12,306 +12,313 @@ spec:
listKind: HorizontalRunnerAutoscalerList
plural: horizontalrunnerautoscalers
shortNames:
- hra
- hra
singular: horizontalrunnerautoscaler
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.minReplicas
name: Min
type: number
- jsonPath: .spec.maxReplicas
name: Max
type: number
- jsonPath: .status.desiredReplicas
name: Desired
type: number
- jsonPath: .status.scheduledOverridesSummary
name: Schedule
type: string
name: v1alpha1
schema:
openAPIV3Schema:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: HorizontalRunnerAutoscalerSpec defines the desired state of HorizontalRunnerAutoscaler
properties:
capacityReservations:
items:
description: |-
CapacityReservation specifies the number of replicas temporarily added
to the scale target until ExpirationTime.
properties:
effectiveTime:
format: date-time
type: string
expirationTime:
format: date-time
type: string
name:
type: string
replicas:
type: integer
type: object
type: array
githubAPICredentialsFrom:
properties:
secretRef:
properties:
name:
type: string
required:
- name
type: object
type: object
maxReplicas:
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
type: integer
metrics:
description: Metrics is the collection of various metric targets to calculate desired number of runners
items:
properties:
repositoryNames:
description: |-
RepositoryNames is the list of repository names to be used for calculating the metric.
For example, a repository name is the REPO part of `github.com/USER/REPO`.
items:
type: string
type: array
scaleDownAdjustment:
description: |-
ScaleDownAdjustment is the number of runners removed on scale-down.
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
type: integer
scaleDownFactor:
description: |-
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be removed.
type: string
scaleDownThreshold:
description: |-
ScaleDownThreshold is the percentage of busy runners less than which will
trigger the hpa to scale the runners down.
type: string
scaleUpAdjustment:
description: |-
ScaleUpAdjustment is the number of runners added on scale-up.
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
type: integer
scaleUpFactor:
description: |-
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be added.
type: string
scaleUpThreshold:
description: |-
ScaleUpThreshold is the percentage of busy runners greater than which will
trigger the hpa to scale runners up.
type: string
type:
description: |-
Type is the type of metric to be used for autoscaling.
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
type: string
type: object
type: array
minReplicas:
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
type: integer
scaleDownDelaySecondsAfterScaleOut:
- additionalPrinterColumns:
- jsonPath: .spec.minReplicas
name: Min
type: number
- jsonPath: .spec.maxReplicas
name: Max
type: number
- jsonPath: .status.desiredReplicas
name: Desired
type: number
- jsonPath: .status.scheduledOverridesSummary
name: Schedule
type: string
name: v1alpha1
schema:
openAPIV3Schema:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: HorizontalRunnerAutoscalerSpec defines the desired state
of HorizontalRunnerAutoscaler
properties:
capacityReservations:
items:
description: |-
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
Used to prevent flapping (down->up->down->... loop)
type: integer
scaleTargetRef:
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
CapacityReservation specifies the number of replicas temporarily added
to the scale target until ExpirationTime.
properties:
kind:
description: Kind is the type of resource being referenced
enum:
- RunnerDeployment
- RunnerSet
effectiveTime:
format: date-time
type: string
expirationTime:
format: date-time
type: string
name:
description: Name is the name of resource being referenced
type: string
replicas:
type: integer
type: object
scaleUpTriggers:
description: |-
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
on each webhook requested received by the webhookBasedAutoscaler.
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
items:
type: array
githubAPICredentialsFrom:
properties:
secretRef:
properties:
amount:
type: integer
duration:
type: string
githubEvent:
properties:
checkRun:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties:
names:
description: |-
Names is a list of GitHub Actions glob patterns.
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to trigger depending on the job.
items:
type: string
type: array
repositories:
description: |-
Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
items:
type: string
type: array
status:
type: string
types:
description: 'One of: created, rerequested, or completed'
items:
type: string
type: array
type: object
pullRequest:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
properties:
branches:
items:
type: string
type: array
types:
items:
type: string
type: array
type: object
push:
description: |-
PushSpec is the condition for triggering scale-up on push event
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object
workflowJob:
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
type: object
type: object
type: object
type: array
scheduledOverrides:
description: |-
ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
The earlier a scheduled override is, the higher it is prioritized.
items:
description: |-
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
properties:
endTime:
description: EndTime is the time at which the first override ends.
format: date-time
type: string
minReplicas:
description: |-
MinReplicas is the number of runners while overriding.
If omitted, it doesn't override minReplicas.
minimum: 0
nullable: true
type: integer
recurrenceRule:
properties:
frequency:
description: |-
Frequency is the name of a predefined interval of each recurrence.
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
If empty, the corresponding override happens only once.
enum:
- Daily
- Weekly
- Monthly
- Yearly
type: string
untilTime:
description: |-
UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
format: date-time
type: string
type: object
startTime:
description: StartTime is the time at which the first override starts.
format: date-time
name:
type: string
required:
- endTime
- startTime
- name
type: object
type: array
type: object
status:
properties:
cacheEntries:
items:
properties:
expirationTime:
format: date-time
type: object
maxReplicas:
description: MaxReplicas is the maximum number of replicas the deployment
is allowed to scale
type: integer
metrics:
description: Metrics is the collection of various metric targets to
calculate desired number of runners
items:
properties:
repositoryNames:
description: |-
RepositoryNames is the list of repository names to be used for calculating the metric.
For example, a repository name is the REPO part of `github.com/USER/REPO`.
items:
type: string
key:
type: string
value:
type: integer
type: object
type: array
desiredReplicas:
type: array
scaleDownAdjustment:
description: |-
ScaleDownAdjustment is the number of runners removed on scale-down.
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
type: integer
scaleDownFactor:
description: |-
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be removed.
type: string
scaleDownThreshold:
description: |-
ScaleDownThreshold is the percentage of busy runners less than which will
trigger the hpa to scale the runners down.
type: string
scaleUpAdjustment:
description: |-
ScaleUpAdjustment is the number of runners added on scale-up.
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
type: integer
scaleUpFactor:
description: |-
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be added.
type: string
scaleUpThreshold:
description: |-
ScaleUpThreshold is the percentage of busy runners greater than which will
trigger the hpa to scale runners up.
type: string
type:
description: |-
Type is the type of metric to be used for autoscaling.
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
type: string
type: object
type: array
minReplicas:
description: MinReplicas is the minimum number of replicas the deployment
is allowed to scale
type: integer
scaleDownDelaySecondsAfterScaleOut:
description: |-
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
Used to prevent flapping (down->up->down->... loop)
type: integer
scaleTargetRef:
description: ScaleTargetRef is the reference to scaled resource like
RunnerDeployment
properties:
kind:
description: Kind is the type of resource being referenced
enum:
- RunnerDeployment
- RunnerSet
type: string
name:
description: Name is the name of resource being referenced
type: string
type: object
scaleUpTriggers:
description: |-
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
on each webhook requested received by the webhookBasedAutoscaler.
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
items:
properties:
amount:
type: integer
duration:
type: string
githubEvent:
properties:
checkRun:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties:
names:
description: |-
Names is a list of GitHub Actions glob patterns.
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to trigger depending on the job.
items:
type: string
type: array
repositories:
description: |-
Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
items:
type: string
type: array
status:
type: string
types:
description: 'One of: created, rerequested, or completed'
items:
type: string
type: array
type: object
pullRequest:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
properties:
branches:
items:
type: string
type: array
types:
items:
type: string
type: array
type: object
push:
description: |-
PushSpec is the condition for triggering scale-up on push event
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object
workflowJob:
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
type: object
type: object
type: object
type: array
scheduledOverrides:
description: |-
ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
The earlier a scheduled override is, the higher it is prioritized.
items:
description: |-
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
type: integer
lastSuccessfulScaleOutTime:
format: date-time
nullable: true
type: string
observedGeneration:
description: |-
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
RunnerDeployment's generation, which is updated on mutation by the API Server.
format: int64
type: integer
scheduledOverridesSummary:
description: |-
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
for observability.
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}
preserveUnknownFields: false
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
properties:
endTime:
description: EndTime is the time at which the first override
ends.
format: date-time
type: string
minReplicas:
description: |-
MinReplicas is the number of runners while overriding.
If omitted, it doesn't override minReplicas.
minimum: 0
nullable: true
type: integer
recurrenceRule:
properties:
frequency:
description: |-
Frequency is the name of a predefined interval of each recurrence.
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
If empty, the corresponding override happens only once.
enum:
- Daily
- Weekly
- Monthly
- Yearly
type: string
untilTime:
description: |-
UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
format: date-time
type: string
type: object
startTime:
description: StartTime is the time at which the first override
starts.
format: date-time
type: string
required:
- endTime
- startTime
type: object
type: array
type: object
status:
properties:
cacheEntries:
items:
properties:
expirationTime:
format: date-time
type: string
key:
type: string
value:
type: integer
type: object
type: array
desiredReplicas:
description: |-
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
type: integer
lastSuccessfulScaleOutTime:
format: date-time
nullable: true
type: string
observedGeneration:
description: |-
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
RunnerDeployment's generation, which is updated on mutation by the API Server.
format: int64
type: integer
scheduledOverridesSummary:
description: |-
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
for observability.
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.17.2
controller-gen.kubebuilder.io/version: v0.19.0
name: runnersets.actions.summerwind.dev
spec:
group: actions.summerwind.dev
@@ -554,7 +554,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -569,7 +568,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -730,7 +728,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -745,7 +742,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -834,8 +830,8 @@ spec:
most preferred is the one with the greatest sum of weights, i.e.
for each node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling anti-affinity expressions, etc.),
compute a sum by iterating through the elements of this field and adding
"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
compute a sum by iterating through the elements of this field and subtracting
"weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
@@ -899,7 +895,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -914,7 +909,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1075,7 +1069,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1090,7 +1083,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1217,7 +1209,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -1271,6 +1265,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -1326,13 +1356,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -1352,7 +1382,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -1601,6 +1633,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: |-
@@ -1991,7 +2029,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -2042,10 +2080,10 @@ spec:
restartPolicy:
description: |-
RestartPolicy defines the restart behavior of individual containers in a pod.
This field may only be set for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
This overrides the pod-level restart policy. When this field is not specified,
the restart behavior is defined by the Pod's restart policy and the container type.
Setting the RestartPolicy as "Always" for the init container will have the following effect:
Additionally, setting the RestartPolicy as "Always" for the init container will
have the following effect:
this init container will be continually restarted on
exit until all regular containers have terminated. Once all regular
containers have completed, all init containers with restartPolicy "Always"
@@ -2057,6 +2095,57 @@ spec:
init container is started, or after any startupProbe has successfully
completed.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. The rules are evaluated in
order. Once a rule matches a container exit condition, the remaining
rules are ignored. If no rule matches the container exit condition,
the Container-level restart policy determines the whether the container
is restarted or not. Constraints on the rules:
- At most 20 rules are allowed.
- Rules can have the same action.
- Identical rules are not forbidden in validations.
When rules are specified, container MUST set RestartPolicy explicitly
even it if matches the Pod's RestartPolicy.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
SecurityContext defines the security options the container should be run with.
@@ -2654,7 +2743,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -2708,6 +2799,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -2763,13 +2890,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -2789,7 +2916,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -3034,6 +3163,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: Probes are not allowed for ephemeral containers.
@@ -3407,7 +3542,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -3459,9 +3594,51 @@ spec:
description: |-
Restart policy for the container to manage the restart behavior of each
container within a pod.
This may only be set for init containers. You cannot set this field on
ephemeral containers.
You cannot set this field on ephemeral containers.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. You cannot set this field on
ephemeral containers.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
Optional: SecurityContext defines the security options the ephemeral container should be run with.
@@ -3980,7 +4157,9 @@ spec:
hostNetwork:
description: |-
Host networking requested for this pod. Use the host's network namespace.
If this option is set, the ports that will be used must be specified.
When using HostNetwork you should specify ports so the scheduler is aware.
When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
Default to false.
type: boolean
hostPID:
@@ -4005,6 +4184,19 @@ spec:
Specifies the hostname of the Pod
If not specified, the pod's hostname will be set to a system-defined value.
type: string
hostnameOverride:
description: |-
HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
This field only specifies the pod's hostname and does not affect its DNS records.
When this field is set to a non-empty string:
- It takes precedence over the values set in `hostname` and `subdomain`.
- The Pod's hostname will be set to this value.
- `setHostnameAsFQDN` must be nil or set to false.
- `hostNetwork` must be set to false.
This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
Requires the HostnameOverride feature gate to be enabled.
type: string
imagePullSecrets:
description: |-
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
@@ -4040,7 +4232,7 @@ spec:
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
The resourceRequirements of an init container are taken into account during scheduling
by finding the highest request/limit for each resource type, and then using the max of
of that value or the sum of the normal containers. Limits are applied to init containers
that value or the sum of the normal containers. Limits are applied to init containers
in a similar fashion.
Init containers cannot currently be added or removed.
Cannot be updated.
@@ -4084,7 +4276,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -4138,6 +4332,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -4193,13 +4423,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -4219,7 +4449,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -4468,6 +4700,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: |-
@@ -4858,7 +5096,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -4909,10 +5147,10 @@ spec:
restartPolicy:
description: |-
RestartPolicy defines the restart behavior of individual containers in a pod.
This field may only be set for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
This overrides the pod-level restart policy. When this field is not specified,
the restart behavior is defined by the Pod's restart policy and the container type.
Setting the RestartPolicy as "Always" for the init container will have the following effect:
Additionally, setting the RestartPolicy as "Always" for the init container will
have the following effect:
this init container will be continually restarted on
exit until all regular containers have terminated. Once all regular
containers have completed, all init containers with restartPolicy "Always"
@@ -4924,6 +5162,57 @@ spec:
init container is started, or after any startupProbe has successfully
completed.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. The rules are evaluated in
order. Once a rule matches a container exit condition, the remaining
rules are ignored. If no rule matches the container exit condition,
the Container-level restart policy determines the whether the container
is restarted or not. Constraints on the rules:
- At most 20 rules are allowed.
- Rules can have the same action.
- Identical rules are not forbidden in validations.
When rules are specified, container MUST set RestartPolicy explicitly
even it if matches the Pod's RestartPolicy.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
SecurityContext defines the security options the container should be run with.
@@ -5437,6 +5726,7 @@ spec:
- spec.hostPID
- spec.hostIPC
- spec.hostUsers
- spec.resources
- spec.securityContext.appArmorProfile
- spec.securityContext.seLinuxOptions
- spec.securityContext.seccompProfile
@@ -5588,7 +5878,7 @@ spec:
description: |-
Resources is the total amount of CPU and Memory resources required by all
containers in the pod. It supports specifying Requests and Limits for
"cpu" and "memory" resource names only. ResourceClaims are not supported.
"cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
This field enables fine-grained control over resource allocation for the
entire pod, allowing resource sharing among containers in a pod.
@@ -5601,7 +5891,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -6126,7 +6416,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -6137,7 +6426,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -6843,15 +7131,13 @@ spec:
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
If specified, the CSI driver will create or update the volume with the attributes defined
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
will be set by the persistentvolume controller if it exists.
it can be changed after the claim is created. An empty string or nil value indicates that no
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
this field can be reset to its previous value (including nil) to cancel the modification.
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -7025,12 +7311,9 @@ spec:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
description: |-
endpoints is the endpoint name that details Glusterfs topology.
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
description: endpoints is the endpoint name that details Glusterfs topology.
type: string
path:
description: |-
@@ -7084,7 +7367,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -7109,7 +7392,7 @@ spec:
description: |-
iscsi represents an ISCSI Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
More info: https://examples.k8s.io/volumes/iscsi/README.md
More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
properties:
chapAuthDiscovery:
description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
@@ -7499,6 +7782,110 @@ spec:
type: array
x-kubernetes-list-type: atomic
type: object
podCertificate:
description: |-
Projects an auto-rotating credential bundle (private key and certificate
chain) that the pod can use either as a TLS client or server.
Kubelet generates a private key and uses it to send a
PodCertificateRequest to the named signer. Once the signer approves the
request and issues a certificate chain, Kubelet writes the key and
certificate chain to the pod filesystem. The pod does not start until
certificates have been issued for each podCertificate projected volume
source in its spec.
Kubelet will begin trying to rotate the certificate at the time indicated
by the signer using the PodCertificateRequest.Status.BeginRefreshAt
timestamp.
Kubelet can write a single file, indicated by the credentialBundlePath
field, or separate files, indicated by the keyPath and
certificateChainPath fields.
The credential bundle is a single file in PEM format. The first PEM
entry is the private key (in PKCS#8 format), and the remaining PEM
entries are the certificate chain issued by the signer (typically,
signers will return their certificate chain in leaf-to-root order).
Prefer using the credential bundle format, since your application code
can read it atomically. If you use keyPath and certificateChainPath,
your application must make two separate file reads. If these coincide
with a certificate rotation, it is possible that the private key and leaf
certificate you read may not correspond to each other. Your application
will need to check for this condition, and re-read until they are
consistent.
The named signer controls chooses the format of the certificate it
issues; consult the signer implementation's documentation to learn how to
use the certificates it issues.
properties:
certificateChainPath:
description: |-
Write the certificate chain at this path in the projected volume.
Most applications should use credentialBundlePath. When using keyPath
and certificateChainPath, your application needs to check that the key
and leaf certificate are consistent, because it is possible to read the
files mid-rotation.
type: string
credentialBundlePath:
description: |-
Write the credential bundle at this path in the projected volume.
The credential bundle is a single file that contains multiple PEM blocks.
The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
key.
The remaining blocks are CERTIFICATE blocks, containing the issued
certificate chain from the signer (leaf and any intermediates).
Using credentialBundlePath lets your Pod's application code make a single
atomic read that retrieves a consistent key and certificate chain. If you
project them to separate files, your application code will need to
additionally check that the leaf certificate was issued to the key.
type: string
keyPath:
description: |-
Write the key at this path in the projected volume.
Most applications should use credentialBundlePath. When using keyPath
and certificateChainPath, your application needs to check that the key
and leaf certificate are consistent, because it is possible to read the
files mid-rotation.
type: string
keyType:
description: |-
The type of keypair Kubelet will generate for the pod.
Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
"ECDSAP521", and "ED25519".
type: string
maxExpirationSeconds:
description: |-
maxExpirationSeconds is the maximum lifetime permitted for the
certificate.
Kubelet copies this value verbatim into the PodCertificateRequests it
generates for this projection.
If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
will reject values shorter than 3600 (1 hour). The maximum allowable
value is 7862400 (91 days).
The signer implementation is then free to issue a certificate with any
lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
seconds (1 hour). This constraint is enforced by kube-apiserver.
`kubernetes.io` signers will never issue certificates with a lifetime
longer than 24 hours.
format: int32
type: integer
signerName:
description: Kubelet's generated CSRs will be addressed to this signer.
type: string
required:
- keyType
- signerName
type: object
secret:
description: secret information about the secret data to project
properties:
@@ -7628,7 +8015,6 @@ spec:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
description: |-
@@ -8170,15 +8556,13 @@ spec:
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
If specified, the CSI driver will create or update the volume with the attributes defined
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
will be set by the persistentvolume controller if it exists.
it can be changed after the claim is created. An empty string or nil value indicates that no
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
this field can be reset to its previous value (including nil) to cancel the modification.
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -8278,13 +8662,11 @@ spec:
description: |-
currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
type: string
modifyVolumeStatus:
description: |-
ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
When this is unset, there is no ModifyVolume operation being attempted.
This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
properties:
status:
description: "status is the status of the ControllerModifyVolume operation. It can be in any of following states:\n - Pending\n Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as\n the specified VolumeAttributesClass not existing.\n - InProgress\n InProgress indicates that the volume is being modified.\n - Infeasible\n Infeasible indicates that the request has been rejected as invalid by the CSI driver. To\n\t resolve the error, a valid VolumeAttributesClass needs to be specified.\nNote: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately."
@@ -8355,7 +8737,6 @@ spec:
type: object
required:
- selector
- serviceName
- template
type: object
status:
@@ -8389,4 +8770,3 @@ spec:
storage: true
subresources:
status: {}
preserveUnknownFields: false

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.11.0
version: 0.13.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.11.0"
appVersion: "0.13.0"
home: https://github.com/actions/actions-runner-controller

View File

@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.17.2
controller-gen.kubebuilder.io/version: v0.19.0
name: ephemeralrunners.actions.github.com
spec:
group: actions.github.com
@@ -36,6 +36,9 @@ spec:
- jsonPath: .status.jobDisplayName
name: JobDisplayName
type: string
- jsonPath: .status.jobId
name: JobId
type: string
- jsonPath: .status.message
name: Message
type: string
@@ -427,7 +430,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -442,7 +444,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -603,7 +604,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -618,7 +618,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -707,8 +706,8 @@ spec:
most preferred is the one with the greatest sum of weights, i.e.
for each node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling anti-affinity expressions, etc.),
compute a sum by iterating through the elements of this field and adding
"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
compute a sum by iterating through the elements of this field and subtracting
"weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
@@ -772,7 +771,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -787,7 +785,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -948,7 +945,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -963,7 +959,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1090,7 +1085,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -1144,6 +1141,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -1199,13 +1232,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -1225,7 +1258,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -1474,6 +1509,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: |-
@@ -1864,7 +1905,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -1915,10 +1956,10 @@ spec:
restartPolicy:
description: |-
RestartPolicy defines the restart behavior of individual containers in a pod.
This field may only be set for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
This overrides the pod-level restart policy. When this field is not specified,
the restart behavior is defined by the Pod's restart policy and the container type.
Setting the RestartPolicy as "Always" for the init container will have the following effect:
Additionally, setting the RestartPolicy as "Always" for the init container will
have the following effect:
this init container will be continually restarted on
exit until all regular containers have terminated. Once all regular
containers have completed, all init containers with restartPolicy "Always"
@@ -1930,6 +1971,57 @@ spec:
init container is started, or after any startupProbe has successfully
completed.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. The rules are evaluated in
order. Once a rule matches a container exit condition, the remaining
rules are ignored. If no rule matches the container exit condition,
the Container-level restart policy determines the whether the container
is restarted or not. Constraints on the rules:
- At most 20 rules are allowed.
- Rules can have the same action.
- Identical rules are not forbidden in validations.
When rules are specified, container MUST set RestartPolicy explicitly
even it if matches the Pod's RestartPolicy.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
SecurityContext defines the security options the container should be run with.
@@ -2527,7 +2619,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -2581,6 +2675,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -2636,13 +2766,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -2662,7 +2792,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -2907,6 +3039,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: Probes are not allowed for ephemeral containers.
@@ -3280,7 +3418,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -3332,9 +3470,51 @@ spec:
description: |-
Restart policy for the container to manage the restart behavior of each
container within a pod.
This may only be set for init containers. You cannot set this field on
ephemeral containers.
You cannot set this field on ephemeral containers.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. You cannot set this field on
ephemeral containers.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
Optional: SecurityContext defines the security options the ephemeral container should be run with.
@@ -3853,7 +4033,9 @@ spec:
hostNetwork:
description: |-
Host networking requested for this pod. Use the host's network namespace.
If this option is set, the ports that will be used must be specified.
When using HostNetwork you should specify ports so the scheduler is aware.
When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
Default to false.
type: boolean
hostPID:
@@ -3878,6 +4060,19 @@ spec:
Specifies the hostname of the Pod
If not specified, the pod's hostname will be set to a system-defined value.
type: string
hostnameOverride:
description: |-
HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
This field only specifies the pod's hostname and does not affect its DNS records.
When this field is set to a non-empty string:
- It takes precedence over the values set in `hostname` and `subdomain`.
- The Pod's hostname will be set to this value.
- `setHostnameAsFQDN` must be nil or set to false.
- `hostNetwork` must be set to false.
This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
Requires the HostnameOverride feature gate to be enabled.
type: string
imagePullSecrets:
description: |-
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
@@ -3913,7 +4108,7 @@ spec:
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
The resourceRequirements of an init container are taken into account during scheduling
by finding the highest request/limit for each resource type, and then using the max of
of that value or the sum of the normal containers. Limits are applied to init containers
that value or the sum of the normal containers. Limits are applied to init containers
in a similar fashion.
Init containers cannot currently be added or removed.
Cannot be updated.
@@ -3957,7 +4152,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -4011,6 +4208,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -4066,13 +4299,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -4092,7 +4325,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -4341,6 +4576,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: |-
@@ -4731,7 +4972,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -4782,10 +5023,10 @@ spec:
restartPolicy:
description: |-
RestartPolicy defines the restart behavior of individual containers in a pod.
This field may only be set for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
This overrides the pod-level restart policy. When this field is not specified,
the restart behavior is defined by the Pod's restart policy and the container type.
Setting the RestartPolicy as "Always" for the init container will have the following effect:
Additionally, setting the RestartPolicy as "Always" for the init container will
have the following effect:
this init container will be continually restarted on
exit until all regular containers have terminated. Once all regular
containers have completed, all init containers with restartPolicy "Always"
@@ -4797,6 +5038,57 @@ spec:
init container is started, or after any startupProbe has successfully
completed.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. The rules are evaluated in
order. Once a rule matches a container exit condition, the remaining
rules are ignored. If no rule matches the container exit condition,
the Container-level restart policy determines the whether the container
is restarted or not. Constraints on the rules:
- At most 20 rules are allowed.
- Rules can have the same action.
- Identical rules are not forbidden in validations.
When rules are specified, container MUST set RestartPolicy explicitly
even it if matches the Pod's RestartPolicy.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
SecurityContext defines the security options the container should be run with.
@@ -5310,6 +5602,7 @@ spec:
- spec.hostPID
- spec.hostIPC
- spec.hostUsers
- spec.resources
- spec.securityContext.appArmorProfile
- spec.securityContext.seLinuxOptions
- spec.securityContext.seccompProfile
@@ -5461,7 +5754,7 @@ spec:
description: |-
Resources is the total amount of CPU and Memory resources required by all
containers in the pod. It supports specifying Requests and Limits for
"cpu" and "memory" resource names only. ResourceClaims are not supported.
"cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
This field enables fine-grained control over resource allocation for the
entire pod, allowing resource sharing among containers in a pod.
@@ -5474,7 +5767,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -6002,7 +6295,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -6013,7 +6305,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -6719,15 +7010,13 @@ spec:
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
If specified, the CSI driver will create or update the volume with the attributes defined
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
will be set by the persistentvolume controller if it exists.
it can be changed after the claim is created. An empty string or nil value indicates that no
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
this field can be reset to its previous value (including nil) to cancel the modification.
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -6901,12 +7190,9 @@ spec:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
description: |-
endpoints is the endpoint name that details Glusterfs topology.
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
description: endpoints is the endpoint name that details Glusterfs topology.
type: string
path:
description: |-
@@ -6960,7 +7246,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -6985,7 +7271,7 @@ spec:
description: |-
iscsi represents an ISCSI Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
More info: https://examples.k8s.io/volumes/iscsi/README.md
More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
properties:
chapAuthDiscovery:
description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
@@ -7375,6 +7661,110 @@ spec:
type: array
x-kubernetes-list-type: atomic
type: object
podCertificate:
description: |-
Projects an auto-rotating credential bundle (private key and certificate
chain) that the pod can use either as a TLS client or server.
Kubelet generates a private key and uses it to send a
PodCertificateRequest to the named signer. Once the signer approves the
request and issues a certificate chain, Kubelet writes the key and
certificate chain to the pod filesystem. The pod does not start until
certificates have been issued for each podCertificate projected volume
source in its spec.
Kubelet will begin trying to rotate the certificate at the time indicated
by the signer using the PodCertificateRequest.Status.BeginRefreshAt
timestamp.
Kubelet can write a single file, indicated by the credentialBundlePath
field, or separate files, indicated by the keyPath and
certificateChainPath fields.
The credential bundle is a single file in PEM format. The first PEM
entry is the private key (in PKCS#8 format), and the remaining PEM
entries are the certificate chain issued by the signer (typically,
signers will return their certificate chain in leaf-to-root order).
Prefer using the credential bundle format, since your application code
can read it atomically. If you use keyPath and certificateChainPath,
your application must make two separate file reads. If these coincide
with a certificate rotation, it is possible that the private key and leaf
certificate you read may not correspond to each other. Your application
will need to check for this condition, and re-read until they are
consistent.
The named signer controls chooses the format of the certificate it
issues; consult the signer implementation's documentation to learn how to
use the certificates it issues.
properties:
certificateChainPath:
description: |-
Write the certificate chain at this path in the projected volume.
Most applications should use credentialBundlePath. When using keyPath
and certificateChainPath, your application needs to check that the key
and leaf certificate are consistent, because it is possible to read the
files mid-rotation.
type: string
credentialBundlePath:
description: |-
Write the credential bundle at this path in the projected volume.
The credential bundle is a single file that contains multiple PEM blocks.
The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
key.
The remaining blocks are CERTIFICATE blocks, containing the issued
certificate chain from the signer (leaf and any intermediates).
Using credentialBundlePath lets your Pod's application code make a single
atomic read that retrieves a consistent key and certificate chain. If you
project them to separate files, your application code will need to
additionally check that the leaf certificate was issued to the key.
type: string
keyPath:
description: |-
Write the key at this path in the projected volume.
Most applications should use credentialBundlePath. When using keyPath
and certificateChainPath, your application needs to check that the key
and leaf certificate are consistent, because it is possible to read the
files mid-rotation.
type: string
keyType:
description: |-
The type of keypair Kubelet will generate for the pod.
Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
"ECDSAP521", and "ED25519".
type: string
maxExpirationSeconds:
description: |-
maxExpirationSeconds is the maximum lifetime permitted for the
certificate.
Kubelet copies this value verbatim into the PodCertificateRequests it
generates for this projection.
If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
will reject values shorter than 3600 (1 hour). The maximum allowable
value is 7862400 (91 days).
The signer implementation is then free to issue a certificate with any
lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
seconds (1 hour). This constraint is enforced by kube-apiserver.
`kubernetes.io` signers will never issue certificates with a lifetime
longer than 24 hours.
format: int32
type: integer
signerName:
description: Kubelet's generated CSRs will be addressed to this signer.
type: string
required:
- keyType
- signerName
type: object
secret:
description: secret information about the secret data to project
properties:
@@ -7504,7 +7894,6 @@ spec:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
description: |-
@@ -7784,6 +8173,53 @@ spec:
required:
- containers
type: object
vaultConfig:
properties:
azureKeyVault:
properties:
certificatePath:
type: string
clientId:
type: string
tenantId:
type: string
url:
type: string
required:
- certificatePath
- clientId
- tenantId
- url
type: object
proxy:
properties:
http:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
https:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
noProxy:
items:
type: string
type: array
type: object
type:
description: |-
VaultType represents the type of vault that can be used in the application.
It is used to identify which vault integration should be used to resolve secrets.
type: string
type: object
required:
- githubConfigSecret
- githubConfigUrl
@@ -7794,10 +8230,13 @@ spec:
properties:
failures:
additionalProperties:
type: boolean
format: date-time
type: string
type: object
jobDisplayName:
type: string
jobId:
type: string
jobRepositoryName:
type: string
jobRequestId:
@@ -7826,8 +8265,6 @@ spec:
type: string
runnerId:
type: integer
runnerJITConfig:
type: string
runnerName:
type: string
workflowRunId:
@@ -7839,4 +8276,3 @@ spec:
storage: true
subresources:
status: {}
preserveUnknownFields: false

View File

@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.17.2
controller-gen.kubebuilder.io/version: v0.19.0
name: ephemeralrunnersets.actions.github.com
spec:
group: actions.github.com
@@ -421,7 +421,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -436,7 +435,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -597,7 +595,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -612,7 +609,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -701,8 +697,8 @@ spec:
most preferred is the one with the greatest sum of weights, i.e.
for each node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling anti-affinity expressions, etc.),
compute a sum by iterating through the elements of this field and adding
"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
compute a sum by iterating through the elements of this field and subtracting
"weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
@@ -766,7 +762,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -781,7 +776,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -942,7 +936,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -957,7 +950,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1084,7 +1076,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -1138,6 +1132,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -1193,13 +1223,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -1219,7 +1249,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -1468,6 +1500,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: |-
@@ -1858,7 +1896,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -1909,10 +1947,10 @@ spec:
restartPolicy:
description: |-
RestartPolicy defines the restart behavior of individual containers in a pod.
This field may only be set for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
This overrides the pod-level restart policy. When this field is not specified,
the restart behavior is defined by the Pod's restart policy and the container type.
Setting the RestartPolicy as "Always" for the init container will have the following effect:
Additionally, setting the RestartPolicy as "Always" for the init container will
have the following effect:
this init container will be continually restarted on
exit until all regular containers have terminated. Once all regular
containers have completed, all init containers with restartPolicy "Always"
@@ -1924,6 +1962,57 @@ spec:
init container is started, or after any startupProbe has successfully
completed.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. The rules are evaluated in
order. Once a rule matches a container exit condition, the remaining
rules are ignored. If no rule matches the container exit condition,
the Container-level restart policy determines the whether the container
is restarted or not. Constraints on the rules:
- At most 20 rules are allowed.
- Rules can have the same action.
- Identical rules are not forbidden in validations.
When rules are specified, container MUST set RestartPolicy explicitly
even it if matches the Pod's RestartPolicy.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
SecurityContext defines the security options the container should be run with.
@@ -2521,7 +2610,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -2575,6 +2666,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -2630,13 +2757,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -2656,7 +2783,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -2901,6 +3030,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: Probes are not allowed for ephemeral containers.
@@ -3274,7 +3409,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -3326,9 +3461,51 @@ spec:
description: |-
Restart policy for the container to manage the restart behavior of each
container within a pod.
This may only be set for init containers. You cannot set this field on
ephemeral containers.
You cannot set this field on ephemeral containers.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. You cannot set this field on
ephemeral containers.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
Optional: SecurityContext defines the security options the ephemeral container should be run with.
@@ -3847,7 +4024,9 @@ spec:
hostNetwork:
description: |-
Host networking requested for this pod. Use the host's network namespace.
If this option is set, the ports that will be used must be specified.
When using HostNetwork you should specify ports so the scheduler is aware.
When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
Default to false.
type: boolean
hostPID:
@@ -3872,6 +4051,19 @@ spec:
Specifies the hostname of the Pod
If not specified, the pod's hostname will be set to a system-defined value.
type: string
hostnameOverride:
description: |-
HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
This field only specifies the pod's hostname and does not affect its DNS records.
When this field is set to a non-empty string:
- It takes precedence over the values set in `hostname` and `subdomain`.
- The Pod's hostname will be set to this value.
- `setHostnameAsFQDN` must be nil or set to false.
- `hostNetwork` must be set to false.
This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
Requires the HostnameOverride feature gate to be enabled.
type: string
imagePullSecrets:
description: |-
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
@@ -3907,7 +4099,7 @@ spec:
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
The resourceRequirements of an init container are taken into account during scheduling
by finding the highest request/limit for each resource type, and then using the max of
of that value or the sum of the normal containers. Limits are applied to init containers
that value or the sum of the normal containers. Limits are applied to init containers
in a similar fashion.
Init containers cannot currently be added or removed.
Cannot be updated.
@@ -3951,7 +4143,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -4005,6 +4199,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -4060,13 +4290,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -4086,7 +4316,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -4335,6 +4567,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: |-
@@ -4725,7 +4963,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -4776,10 +5014,10 @@ spec:
restartPolicy:
description: |-
RestartPolicy defines the restart behavior of individual containers in a pod.
This field may only be set for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
This overrides the pod-level restart policy. When this field is not specified,
the restart behavior is defined by the Pod's restart policy and the container type.
Setting the RestartPolicy as "Always" for the init container will have the following effect:
Additionally, setting the RestartPolicy as "Always" for the init container will
have the following effect:
this init container will be continually restarted on
exit until all regular containers have terminated. Once all regular
containers have completed, all init containers with restartPolicy "Always"
@@ -4791,6 +5029,57 @@ spec:
init container is started, or after any startupProbe has successfully
completed.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. The rules are evaluated in
order. Once a rule matches a container exit condition, the remaining
rules are ignored. If no rule matches the container exit condition,
the Container-level restart policy determines the whether the container
is restarted or not. Constraints on the rules:
- At most 20 rules are allowed.
- Rules can have the same action.
- Identical rules are not forbidden in validations.
When rules are specified, container MUST set RestartPolicy explicitly
even it if matches the Pod's RestartPolicy.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
SecurityContext defines the security options the container should be run with.
@@ -5304,6 +5593,7 @@ spec:
- spec.hostPID
- spec.hostIPC
- spec.hostUsers
- spec.resources
- spec.securityContext.appArmorProfile
- spec.securityContext.seLinuxOptions
- spec.securityContext.seccompProfile
@@ -5455,7 +5745,7 @@ spec:
description: |-
Resources is the total amount of CPU and Memory resources required by all
containers in the pod. It supports specifying Requests and Limits for
"cpu" and "memory" resource names only. ResourceClaims are not supported.
"cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
This field enables fine-grained control over resource allocation for the
entire pod, allowing resource sharing among containers in a pod.
@@ -5468,7 +5758,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -5996,7 +6286,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -6007,7 +6296,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -6713,15 +7001,13 @@ spec:
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
If specified, the CSI driver will create or update the volume with the attributes defined
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
will be set by the persistentvolume controller if it exists.
it can be changed after the claim is created. An empty string or nil value indicates that no
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
this field can be reset to its previous value (including nil) to cancel the modification.
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -6895,12 +7181,9 @@ spec:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
description: |-
endpoints is the endpoint name that details Glusterfs topology.
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
description: endpoints is the endpoint name that details Glusterfs topology.
type: string
path:
description: |-
@@ -6954,7 +7237,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -6979,7 +7262,7 @@ spec:
description: |-
iscsi represents an ISCSI Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
More info: https://examples.k8s.io/volumes/iscsi/README.md
More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
properties:
chapAuthDiscovery:
description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
@@ -7369,6 +7652,110 @@ spec:
type: array
x-kubernetes-list-type: atomic
type: object
podCertificate:
description: |-
Projects an auto-rotating credential bundle (private key and certificate
chain) that the pod can use either as a TLS client or server.
Kubelet generates a private key and uses it to send a
PodCertificateRequest to the named signer. Once the signer approves the
request and issues a certificate chain, Kubelet writes the key and
certificate chain to the pod filesystem. The pod does not start until
certificates have been issued for each podCertificate projected volume
source in its spec.
Kubelet will begin trying to rotate the certificate at the time indicated
by the signer using the PodCertificateRequest.Status.BeginRefreshAt
timestamp.
Kubelet can write a single file, indicated by the credentialBundlePath
field, or separate files, indicated by the keyPath and
certificateChainPath fields.
The credential bundle is a single file in PEM format. The first PEM
entry is the private key (in PKCS#8 format), and the remaining PEM
entries are the certificate chain issued by the signer (typically,
signers will return their certificate chain in leaf-to-root order).
Prefer using the credential bundle format, since your application code
can read it atomically. If you use keyPath and certificateChainPath,
your application must make two separate file reads. If these coincide
with a certificate rotation, it is possible that the private key and leaf
certificate you read may not correspond to each other. Your application
will need to check for this condition, and re-read until they are
consistent.
The named signer controls chooses the format of the certificate it
issues; consult the signer implementation's documentation to learn how to
use the certificates it issues.
properties:
certificateChainPath:
description: |-
Write the certificate chain at this path in the projected volume.
Most applications should use credentialBundlePath. When using keyPath
and certificateChainPath, your application needs to check that the key
and leaf certificate are consistent, because it is possible to read the
files mid-rotation.
type: string
credentialBundlePath:
description: |-
Write the credential bundle at this path in the projected volume.
The credential bundle is a single file that contains multiple PEM blocks.
The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
key.
The remaining blocks are CERTIFICATE blocks, containing the issued
certificate chain from the signer (leaf and any intermediates).
Using credentialBundlePath lets your Pod's application code make a single
atomic read that retrieves a consistent key and certificate chain. If you
project them to separate files, your application code will need to
additionally check that the leaf certificate was issued to the key.
type: string
keyPath:
description: |-
Write the key at this path in the projected volume.
Most applications should use credentialBundlePath. When using keyPath
and certificateChainPath, your application needs to check that the key
and leaf certificate are consistent, because it is possible to read the
files mid-rotation.
type: string
keyType:
description: |-
The type of keypair Kubelet will generate for the pod.
Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
"ECDSAP521", and "ED25519".
type: string
maxExpirationSeconds:
description: |-
maxExpirationSeconds is the maximum lifetime permitted for the
certificate.
Kubelet copies this value verbatim into the PodCertificateRequests it
generates for this projection.
If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
will reject values shorter than 3600 (1 hour). The maximum allowable
value is 7862400 (91 days).
The signer implementation is then free to issue a certificate with any
lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
seconds (1 hour). This constraint is enforced by kube-apiserver.
`kubernetes.io` signers will never issue certificates with a lifetime
longer than 24 hours.
format: int32
type: integer
signerName:
description: Kubelet's generated CSRs will be addressed to this signer.
type: string
required:
- keyType
- signerName
type: object
secret:
description: secret information about the secret data to project
properties:
@@ -7498,7 +7885,6 @@ spec:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
description: |-
@@ -7778,6 +8164,53 @@ spec:
required:
- containers
type: object
vaultConfig:
properties:
azureKeyVault:
properties:
certificatePath:
type: string
clientId:
type: string
tenantId:
type: string
url:
type: string
required:
- certificatePath
- clientId
- tenantId
- url
type: object
proxy:
properties:
http:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
https:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
noProxy:
items:
type: string
type: array
type: object
type:
description: |-
VaultType represents the type of vault that can be used in the application.
It is used to identify which vault integration should be used to resolve secrets.
type: string
type: object
required:
- githubConfigSecret
- githubConfigUrl
@@ -7812,4 +8245,3 @@ spec:
storage: true
subresources:
status: {}
preserveUnknownFields: false

View File

@@ -129,11 +129,3 @@ Create the name of the service account to use
{{- define "gha-runner-scale-set-controller.leaderElectionRoleBinding" -}}
{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election
{{- end }}
{{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}}
{{- $names := list }}
{{- range $k, $v := . }}
{{- $names = append $names $v.name }}
{{- end }}
{{- $names | join ","}}
{{- end }}

View File

@@ -54,7 +54,9 @@ spec:
- "--leader-election-id={{ include "gha-runner-scale-set-controller.fullname" . }}"
{{- end }}
{{- with .Values.imagePullSecrets }}
- "--auto-scaler-image-pull-secrets={{ include "gha-runner-scale-set-controller.imagePullSecretsNames" . }}"
{{- range . }}
- "--auto-scaler-image-pull-secrets={{- .name -}}"
{{- end }}
{{- end }}
{{- with .Values.flags.logLevel }}
- "--log-level={{ . }}"

View File

@@ -683,7 +683,8 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
expectedArgs := []string{
"--auto-scaling-runner-set-only",
"--auto-scaler-image-pull-secrets=dockerhub,ghcr",
"--auto-scaler-image-pull-secrets=dockerhub",
"--auto-scaler-image-pull-secrets=ghcr",
"--log-level=debug",
"--log-format=text",
"--update-strategy=immediate",
@@ -1079,6 +1080,7 @@ func TestDeployment_excludeLabelPropagationPrefixes(t *testing.T) {
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=prefix.com/")
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=complete.io/label")
}
func TestNamespaceOverride(t *testing.T) {
t.Parallel()

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.11.0
version: 0.13.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.11.0"
appVersion: "0.13.0"
home: https://github.com/actions/actions-runner-controller

View File

@@ -62,12 +62,12 @@ app.kubernetes.io/instance: {{ include "gha-runner-scale-set.scale-set-name" . }
{{- fail "Values.githubConfigSecret is required for setting auth with GitHub server." }}
{{- end }}
{{- else }}
{{- include "gha-runner-scale-set.fullname" . }}-github-secret
{{- include "gha-runner-scale-set.fullname" . | replace "_" "-" }}-github-secret
{{- end }}
{{- end }}
{{- define "gha-runner-scale-set.noPermissionServiceAccountName" -}}
{{- include "gha-runner-scale-set.fullname" . }}-no-permission
{{- include "gha-runner-scale-set.fullname" . | replace "_" "-" }}-no-permission
{{- end }}
{{- define "gha-runner-scale-set.kubeModeRoleName" -}}
@@ -79,7 +79,7 @@ app.kubernetes.io/instance: {{ include "gha-runner-scale-set.scale-set-name" . }
{{- end }}
{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}}
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode
{{- include "gha-runner-scale-set.fullname" . | replace "_" "-" }}-kube-mode
{{- end }}
{{- define "gha-runner-scale-set.dind-init-container" -}}
@@ -106,6 +106,17 @@ env:
value: "123"
securityContext:
privileged: true
{{- if (ge (.Capabilities.KubeVersion.Minor | int) 29) }}
restartPolicy: Always
startupProbe:
exec:
command:
- docker
- info
initialDelaySeconds: 0
failureThreshold: 24
periodSeconds: 5
{{- end }}
volumeMounts:
- name: work
mountPath: /home/runner/_work
@@ -366,6 +377,101 @@ volumeMounts:
{{- end }}
{{- end }}
{{- define "gha-runner-scale-set.kubernetes-novolume-mode-runner-container" -}}
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
{{- range $i, $container := .Values.template.spec.containers }}
{{- if eq $container.name "runner" }}
{{- $setRunnerImage := "" }}
{{- range $key, $val := $container }}
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
{{- if eq $key "image" }}
{{- $setRunnerImage = $val }}
{{- end }}
{{ $key }}: {{ $val | toYaml | nindent 2 }}
{{- end }}
{{- end }}
{{- $setContainerHooks := 1 }}
{{- $setPodName := 1 }}
{{- $setRequireJobContainer := 1 }}
{{- $setActionsRunnerImage := 1 }}
{{- $setNodeExtraCaCerts := 0 }}
{{- $setRunnerUpdateCaCerts := 0 }}
{{- if $tlsConfig.runnerMountPath }}
{{- $setNodeExtraCaCerts = 1 }}
{{- $setRunnerUpdateCaCerts = 1 }}
{{- end }}
env:
{{- with $container.env }}
{{- range $i, $env := . }}
{{- if eq $env.name "ACTIONS_RUNNER_CONTAINER_HOOKS" }}
{{- $setContainerHooks = 0 }}
{{- end }}
{{- if eq $env.name "ACTIONS_RUNNER_IMAGE" }}
{{- $setActionsRunnerImage = 0 }}
{{- end }}
{{- if eq $env.name "ACTIONS_RUNNER_POD_NAME" }}
{{- $setPodName = 0 }}
{{- end }}
{{- if eq $env.name "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER" }}
{{- $setRequireJobContainer = 0 }}
{{- end }}
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
{{- $setNodeExtraCaCerts = 0 }}
{{- end }}
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
{{- $setRunnerUpdateCaCerts = 0 }}
{{- end }}
- {{ $env | toYaml | nindent 4 }}
{{- end }}
{{- end }}
{{- if $setContainerHooks }}
- name: ACTIONS_RUNNER_CONTAINER_HOOKS
value: /home/runner/k8s-novolume/index.js
{{- end }}
{{- if $setPodName }}
- name: ACTIONS_RUNNER_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
{{- end }}
{{- if $setRequireJobContainer }}
- name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER
value: "true"
{{- end }}
{{- if $setActionsRunnerImage }}
- name: ACTIONS_RUNNER_IMAGE
value: "{{- $setRunnerImage -}}"
{{- end }}
{{- if $setNodeExtraCaCerts }}
- name: NODE_EXTRA_CA_CERTS
value: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
{{- end }}
{{- if $setRunnerUpdateCaCerts }}
- name: RUNNER_UPDATE_CA_CERTS
value: "1"
{{- end }}
{{- $mountGitHubServerTLS := 0 }}
{{- if $tlsConfig.runnerMountPath }}
{{- $mountGitHubServerTLS = 1 }}
{{- end }}
volumeMounts:
{{- with $container.volumeMounts }}
{{- range $i, $volMount := . }}
{{- if eq $volMount.name "github-server-tls-cert" }}
{{- $mountGitHubServerTLS = 0 }}
{{- end }}
- {{ $volMount | toYaml | nindent 4 }}
{{- end }}
{{- end }}
{{- if $mountGitHubServerTLS }}
- name: github-server-tls-cert
mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- define "gha-runner-scale-set.default-mode-runner-containers" -}}
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
{{- range $i, $container := .Values.template.spec.containers }}

View File

@@ -8,26 +8,45 @@ metadata:
{{- if gt (len (include "gha-runner-scale-set.namespace" .)) 63 }}
{{ fail "Namespace must have up to 63 characters" }}
{{- end }}
name: {{ include "gha-runner-scale-set.scale-set-name" . }}
name: {{ include "gha-runner-scale-set.scale-set-name" . | replace "_" "-" }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels:
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
{{- $extra := dict "app.kubernetes.io/component" "" }}
{{- $reserved := merge $base $extra }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- range $k, $v := . }}
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
{{ $k }}: {{ $v | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.autoscalingRunnerSet.labels }}
{{- toYaml . | nindent 4 }}
{{- range $k, $v := . }}
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
{{ $k }}: {{ $v | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
app.kubernetes.io/component: "autoscaling-runner-set"
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
annotations:
{{- with .Values.annotations }}
{{- toYaml . | nindent 4 }}
{{- range $k, $v := . }}
{{- if not (or (hasPrefix "actions.github.com/cleanup-" $k) (eq $k "actions.github.com/values-hash")) }}
{{ $k }}: {{ $v | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.autoscalingRunnerSet.annotations }}
{{- toYaml . | nindent 4 }}
{{- range $k, $v := . }}
{{- if not (or (hasPrefix "actions.github.com/cleanup-" $k) (eq $k "actions.github.com/values-hash")) }}
{{ $k }}: {{ $v | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
actions.github.com/values-hash: {{ toJson .Values | sha256sum | trunc 63 }}
@@ -37,14 +56,15 @@ metadata:
{{- end }}
actions.github.com/cleanup-manager-role-binding: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
actions.github.com/cleanup-manager-role-name: {{ include "gha-runner-scale-set.managerRoleName" . }}
{{- if and $containerMode (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
{{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
actions.github.com/cleanup-kubernetes-mode-role-binding-name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
actions.github.com/cleanup-kubernetes-mode-role-name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
actions.github.com/cleanup-kubernetes-mode-service-account-name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
{{- end }}
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
{{- if and (ne $containerMode.type "kubernetes") (ne $containerMode.type "kubernetes-novolume") (not .Values.template.spec.serviceAccountName) }}
actions.github.com/cleanup-no-permission-service-account-name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
{{- end }}
spec:
githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }}
githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }}
@@ -65,6 +85,24 @@ spec:
{{- end }}
{{- end }}
{{- if and .Values.keyVault .Values.keyVault.type }}
vaultConfig:
type: {{ .Values.keyVault.type }}
{{- if .Values.keyVault.proxy }}
proxy: {{- toYaml .Values.keyVault.proxy | nindent 6 }}
{{- end }}
{{- if eq .Values.keyVault.type "azure_key_vault" }}
azureKeyVault:
url: {{ .Values.keyVault.azureKeyVault.url }}
tenantId: {{ .Values.keyVault.azureKeyVault.tenantId }}
clientId: {{ .Values.keyVault.azureKeyVault.clientId }}
certificatePath: {{ .Values.keyVault.azureKeyVault.certificatePath }}
secretKey: {{ .Values.keyVault.azureKeyVault.secretKey }}
{{- else }}
{{- fail "Unsupported keyVault type: " .Values.keyVault.type }}
{{- end }}
{{- end }}
{{- if .Values.proxy }}
proxy:
{{- if .Values.proxy.http }}
@@ -138,7 +176,7 @@ spec:
restartPolicy: Never
{{- end }}
{{- $containerMode := .Values.containerMode }}
{{- if eq $containerMode.type "kubernetes" }}
{{- if or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume") }}
serviceAccountName: {{ default (include "gha-runner-scale-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }}
{{- else }}
serviceAccountName: {{ default (include "gha-runner-scale-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }}
@@ -147,7 +185,11 @@ spec:
initContainers:
{{- if eq $containerMode.type "dind" }}
- name: init-dind-externals
{{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }}
{{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }}
{{- if (ge (.Capabilities.KubeVersion.Minor | int) 29) }}
- name: dind
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
{{- end }}
{{- end }}
{{- with .Values.template.spec.initContainers }}
{{- toYaml . | nindent 6 }}
@@ -157,18 +199,24 @@ spec:
{{- if eq $containerMode.type "dind" }}
- name: runner
{{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }}
{{- if not (ge (.Capabilities.KubeVersion.Minor | int) 29) }}
- name: dind
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
{{- end }}
{{- include "gha-runner-scale-set.non-runner-non-dind-containers" . | nindent 6 }}
{{- else if eq $containerMode.type "kubernetes" }}
- name: runner
{{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }}
{{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
{{- else if eq $containerMode.type "kubernetes-novolume" }}
- name: runner
{{- include "gha-runner-scale-set.kubernetes-novolume-mode-runner-container" . | nindent 8 }}
{{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
{{- else }}
{{- include "gha-runner-scale-set.default-mode-runner-containers" . | nindent 6 }}
{{- end }}
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
{{- if or .Values.template.spec.volumes (eq $containerMode.type "dind") (eq $containerMode.type "kubernetes") $tlsConfig.runnerMountPath }}
{{- if or .Values.template.spec.volumes (eq $containerMode.type "dind") (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume") $tlsConfig.runnerMountPath }}
volumes:
{{- if $tlsConfig.runnerMountPath }}
{{- include "gha-runner-scale-set.tls-volume" $tlsConfig | nindent 6 }}

View File

@@ -6,8 +6,15 @@ metadata:
name: {{ include "gha-runner-scale-set.githubsecret" . }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels:
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
{{- $extra := dict "app.kubernetes.io/component" "" }}
{{- $reserved := merge $base $extra }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- range $k, $v := . }}
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
{{ $k }}: {{ $v | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.githubConfigSecret.labels }}

View File

@@ -1,6 +1,6 @@
{{- $containerMode := .Values.containerMode }}
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeRole) }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
{{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
# default permission for runner pod service account in kubernetes mode (container hook)
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
@@ -8,8 +8,15 @@ metadata:
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels:
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
{{- $extra := dict "app.kubernetes.io/component" "" }}
{{- $reserved := merge $base $extra }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- range $k, $v := . }}
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
{{ $k }}: {{ $v | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.kubernetesModeRole.labels }}
@@ -38,9 +45,11 @@ rules:
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch",]
{{- if ne $containerMode.type "kubernetes-novolume" }}
- apiGroups: ["batch"]
resources: ["jobs"]
verbs: ["get", "list", "create", "delete"]
{{- end }}
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "create", "delete"]

View File

@@ -1,14 +1,21 @@
{{- $containerMode := .Values.containerMode }}
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeRoleBinding) }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
{{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels:
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
{{- $extra := dict "app.kubernetes.io/component" "" }}
{{- $reserved := merge $base $extra }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- range $k, $v := . }}
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
{{ $k }}: {{ $v | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.kubernetesModeRoleBinding.labels }}

View File

@@ -1,6 +1,6 @@
{{- $containerMode := .Values.containerMode }}
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeServiceAccount) }}
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
{{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
apiVersion: v1
kind: ServiceAccount
metadata:
@@ -18,8 +18,15 @@ metadata:
{{- end }}
{{- end }}
labels:
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
{{- $extra := dict "app.kubernetes.io/component" "" }}
{{- $reserved := merge $base $extra }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- range $k, $v := . }}
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
{{ $k }}: {{ $v | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.kubernetesModeServiceAccount.labels }}

View File

@@ -5,8 +5,15 @@ metadata:
name: {{ include "gha-runner-scale-set.managerRoleName" . }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels:
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
{{- $extra := dict "app.kubernetes.io/component" "manager-role" }}
{{- $reserved := merge $base $extra }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- range $k, $v := . }}
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
{{ $k }}: {{ $v | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.managerRole.labels }}

View File

@@ -5,8 +5,15 @@ metadata:
name: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels:
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
{{- $extra := dict "app.kubernetes.io/component" "manager-role-binding" }}
{{- $reserved := merge $base $extra }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- range $k, $v := . }}
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
{{ $k }}: {{ $v | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.managerRoleBinding.labels }}

View File

@@ -7,8 +7,15 @@ metadata:
name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
namespace: {{ include "gha-runner-scale-set.namespace" . }}
labels:
{{- $base := include "gha-runner-scale-set.labels" . | fromYaml }}
{{- $extra := dict "app.kubernetes.io/component" "" }}
{{- $reserved := merge $base $extra }}
{{- with .Values.labels }}
{{- toYaml . | nindent 4 }}
{{- range $k, $v := . }}
{{- if not (or (hasKey $reserved $k) (hasPrefix "actions.github.com/" $k)) }}
{{ $k }}: {{ $v | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- if $hasCustomResourceMeta }}
{{- with .Values.resourceMeta.noPermissionServiceAccount.labels }}

View File

@@ -204,7 +204,6 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
@@ -270,6 +269,72 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
}
func TestTemplateRenderedSetServiceAccountToKubeNoVolumeMode(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "kubernetes-novolume",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_serviceaccount.yaml"})
var serviceAccount corev1.ServiceAccount
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
assert.Equal(t, namespaceName, serviceAccount.Namespace)
assert.Equal(t, "test-runners-gha-rs-kube-mode", serviceAccount.Name)
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
var role rbacv1.Role
helm.UnmarshalK8SYaml(t, output, &role)
assert.Equal(t, namespaceName, role.Namespace)
assert.Equal(t, "test-runners-gha-rs-kube-mode", role.Name)
assert.Equal(t, "actions.github.com/cleanup-protection", role.Finalizers[0])
assert.Len(t, role.Rules, 4, "kube mode role should have 4 rules")
assert.Equal(t, "pods", role.Rules[0].Resources[0])
assert.Equal(t, "pods/exec", role.Rules[1].Resources[0])
assert.Equal(t, "pods/log", role.Rules[2].Resources[0])
assert.Equal(t, "secrets", role.Rules[3].Resources[0])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role_binding.yaml"})
var roleBinding rbacv1.RoleBinding
helm.UnmarshalK8SYaml(t, output, &roleBinding)
assert.Equal(t, namespaceName, roleBinding.Namespace)
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.Name)
assert.Len(t, roleBinding.Subjects, 1)
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.Subjects[0].Name)
assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace)
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.RoleRef.Name)
assert.Equal(t, "Role", roleBinding.RoleRef.Kind)
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
expectedServiceAccountName := "test-runners-gha-rs-kube-mode"
assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName)
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
}
func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
t.Parallel()
@@ -728,20 +793,20 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraInitContainers(t *testin
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 3, "InitContainers should be 3")
assert.Equal(t, "kube-init", ars.Spec.Template.Spec.InitContainers[1].Name, "InitContainers[1] Name should be kube-init")
assert.Equal(t, "runner-image:latest", ars.Spec.Template.Spec.InitContainers[1].Image, "InitContainers[1] Image should be runner-image:latest")
assert.Equal(t, "sudo", ars.Spec.Template.Spec.InitContainers[1].Command[0], "InitContainers[1] Command[0] should be sudo")
assert.Equal(t, "chown", ars.Spec.Template.Spec.InitContainers[1].Command[1], "InitContainers[1] Command[1] should be chown")
assert.Equal(t, "-R", ars.Spec.Template.Spec.InitContainers[1].Command[2], "InitContainers[1] Command[2] should be -R")
assert.Equal(t, "1001:123", ars.Spec.Template.Spec.InitContainers[1].Command[3], "InitContainers[1] Command[3] should be 1001:123")
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].Command[4], "InitContainers[1] Command[4] should be /home/runner/_work")
assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].Name, "InitContainers[1] VolumeMounts[0] Name should be work")
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].MountPath, "InitContainers[1] VolumeMounts[0] MountPath should be /home/runner/_work")
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 4, "InitContainers should be 4")
assert.Equal(t, "kube-init", ars.Spec.Template.Spec.InitContainers[2].Name, "InitContainers[1] Name should be kube-init")
assert.Equal(t, "runner-image:latest", ars.Spec.Template.Spec.InitContainers[2].Image, "InitContainers[1] Image should be runner-image:latest")
assert.Equal(t, "sudo", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[1] Command[0] should be sudo")
assert.Equal(t, "chown", ars.Spec.Template.Spec.InitContainers[2].Command[1], "InitContainers[1] Command[1] should be chown")
assert.Equal(t, "-R", ars.Spec.Template.Spec.InitContainers[2].Command[2], "InitContainers[1] Command[2] should be -R")
assert.Equal(t, "1001:123", ars.Spec.Template.Spec.InitContainers[2].Command[3], "InitContainers[1] Command[3] should be 1001:123")
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[2].Command[4], "InitContainers[1] Command[4] should be /home/runner/_work")
assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[2].VolumeMounts[0].Name, "InitContainers[1] VolumeMounts[0] Name should be work")
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[2].VolumeMounts[0].MountPath, "InitContainers[1] VolumeMounts[0] MountPath should be /home/runner/_work")
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Name, "InitContainers[2] Name should be ls")
assert.Equal(t, "ubuntu:latest", ars.Spec.Template.Spec.InitContainers[2].Image, "InitContainers[2] Image should be ubuntu:latest")
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[2] Command[0] should be ls")
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[3].Name, "InitContainers[2] Name should be ls")
assert.Equal(t, "ubuntu:latest", ars.Spec.Template.Spec.InitContainers[3].Image, "InitContainers[2] Image should be ubuntu:latest")
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[3].Command[0], "InitContainers[2] Command[0] should be ls")
}
func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
@@ -860,13 +925,26 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil")
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 1, "Template.Spec should have 1 init container")
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 2, "Template.Spec should have 2 init container")
assert.Equal(t, "init-dind-externals", ars.Spec.Template.Spec.InitContainers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.InitContainers[0].Image)
assert.Equal(t, "cp", ars.Spec.Template.Spec.InitContainers[0].Command[0])
assert.Equal(t, "-r /home/runner/externals/. /home/runner/tmpDir/", strings.Join(ars.Spec.Template.Spec.InitContainers[0].Args, " "))
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "Template.Spec should have 2 container")
assert.Equal(t, "dind", ars.Spec.Template.Spec.InitContainers[1].Name)
assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.InitContainers[1].Image)
assert.True(t, *ars.Spec.Template.Spec.InitContainers[1].SecurityContext.Privileged)
assert.Len(t, ars.Spec.Template.Spec.InitContainers[1].VolumeMounts, 3, "The dind container should have 3 volume mounts, dind-sock, work and externals")
assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].Name)
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].MountPath)
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[1].Name)
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[1].MountPath)
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[2].Name)
assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[2].MountPath)
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 2, "The runner container should have 2 env vars, DOCKER_HOST and RUNNER_WAIT_FOR_DOCKER_IN_SECONDS")
@@ -883,19 +961,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name)
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
assert.Equal(t, "dind", ars.Spec.Template.Spec.Containers[1].Name)
assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.Containers[1].Image)
assert.True(t, *ars.Spec.Template.Spec.Containers[1].SecurityContext.Privileged)
assert.Len(t, ars.Spec.Template.Spec.Containers[1].VolumeMounts, 3, "The dind container should have 3 volume mounts, dind-sock, work and externals")
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].Name)
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath)
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].Name)
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].MountPath)
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].Name)
assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].MountPath)
assert.Len(t, ars.Spec.Template.Spec.Volumes, 3, "Volumes should be 3")
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be dind-sock")
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Volumes[1].Name, "Volume name should be dind-externals")
@@ -961,6 +1026,65 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T)
assert.NotNil(t, ars.Spec.Template.Spec.Volumes[0].Ephemeral, "Template.Spec should have 1 ephemeral volume")
}
func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesModeNoVolume(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"containerMode.type": "kubernetes-novolume",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, namespaceName, ars.Namespace)
assert.Equal(t, "test-runners", ars.Name)
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/name"])
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil")
assert.Nil(t, ars.Spec.MaxRunners, "MaxRunners should be nil")
assert.Nil(t, ars.Spec.Proxy, "Proxy should be nil")
assert.Nil(t, ars.Spec.GitHubServerTLS, "GitHubServerTLS should be nil")
assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil")
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
require.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 4, "The runner container should have 4 env vars")
assert.Equal(t, "ACTIONS_RUNNER_CONTAINER_HOOKS", ars.Spec.Template.Spec.Containers[0].Env[0].Name)
assert.Equal(t, "/home/runner/k8s-novolume/index.js", ars.Spec.Template.Spec.Containers[0].Env[0].Value)
assert.Equal(t, "ACTIONS_RUNNER_POD_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name)
assert.Equal(t, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER", ars.Spec.Template.Spec.Containers[0].Env[2].Name)
assert.Equal(t, "true", ars.Spec.Template.Spec.Containers[0].Env[2].Value)
assert.Equal(t, "ACTIONS_RUNNER_IMAGE", ars.Spec.Template.Spec.Containers[0].Env[3].Name)
assert.Equal(t, ars.Spec.Template.Spec.Containers[0].Image, ars.Spec.Template.Spec.Containers[0].Env[3].Value)
assert.Len(t, ars.Spec.Template.Spec.Volumes, 0, "Template.Spec should have 0 volumes")
}
func TestTemplateRenderedAutoscalingRunnerSet_ListenerPodTemplate(t *testing.T) {
t.Parallel()
@@ -1140,7 +1264,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
}
t.Run("providing githubServerTLS.runnerMountPath", func(t *testing.T) {
t.Run("mode: default", func(t *testing.T) {
t.Run("mode default", func(t *testing.T) {
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
@@ -1158,7 +1282,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.GitHubServerTLSConfig{
expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@@ -1178,7 +1302,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
}
}
require.NotNil(t, volume)
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name)
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
@@ -1199,7 +1323,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
})
})
t.Run("mode: dind", func(t *testing.T) {
t.Run("mode dind", func(t *testing.T) {
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
@@ -1218,7 +1342,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.GitHubServerTLSConfig{
expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@@ -1238,7 +1362,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
}
}
require.NotNil(t, volume)
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name)
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
@@ -1259,7 +1383,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
})
})
t.Run("mode: kubernetes", func(t *testing.T) {
t.Run("mode kubernetes", func(t *testing.T) {
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
@@ -1278,7 +1402,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.GitHubServerTLSConfig{
expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@@ -1298,7 +1422,67 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
}
}
require.NotNil(t, volume)
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name)
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
assert.Contains(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
Name: "github-server-tls-cert",
MountPath: "/runner/mount/path/cert.pem",
SubPath: "cert.pem",
})
assert.Contains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
Name: "NODE_EXTRA_CA_CERTS",
Value: "/runner/mount/path/cert.pem",
})
assert.Contains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
Name: "RUNNER_UPDATE_CA_CERTS",
Value: "1",
})
})
t.Run("mode kubernetes-novolume", func(t *testing.T) {
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secrets",
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"githubServerTLS.runnerMountPath": "/runner/mount/path",
"containerMode.type": "kubernetes-novolume",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: "certs-configmap",
},
Key: "cert.pem",
},
},
}
assert.Equal(t, expected, ars.Spec.GitHubServerTLS)
var volume *corev1.Volume
for _, v := range ars.Spec.Template.Spec.Volumes {
if v.Name == "github-server-tls-cert" {
volume = &v
break
}
}
require.NotNil(t, volume)
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
@@ -1321,7 +1505,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
})
t.Run("without providing githubServerTLS.runnerMountPath", func(t *testing.T) {
t.Run("mode: default", func(t *testing.T) {
t.Run("mode default", func(t *testing.T) {
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
@@ -1338,7 +1522,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.GitHubServerTLSConfig{
expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@@ -1376,7 +1560,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
})
})
t.Run("mode: dind", func(t *testing.T) {
t.Run("mode dind", func(t *testing.T) {
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
@@ -1394,7 +1578,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.GitHubServerTLSConfig{
expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@@ -1432,7 +1616,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
})
})
t.Run("mode: kubernetes", func(t *testing.T) {
t.Run("mode kubernetes", func(t *testing.T) {
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
@@ -1450,7 +1634,63 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.GitHubServerTLSConfig{
expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: "certs-configmap",
},
Key: "cert.pem",
},
},
}
assert.Equal(t, expected, ars.Spec.GitHubServerTLS)
var volume *corev1.Volume
for _, v := range ars.Spec.Template.Spec.Volumes {
if v.Name == "github-server-tls-cert" {
volume = &v
break
}
}
assert.Nil(t, volume)
assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
Name: "github-server-tls-cert",
MountPath: "/runner/mount/path/cert.pem",
SubPath: "cert.pem",
})
assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
Name: "NODE_EXTRA_CA_CERTS",
Value: "/runner/mount/path/cert.pem",
})
assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
Name: "RUNNER_UPDATE_CA_CERTS",
Value: "1",
})
})
t.Run("mode kubernetes-novolume", func(t *testing.T) {
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret": "pre-defined-secrets",
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
"containerMode.type": "kubernetes-novolume",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
ars := render(t, options)
require.NotNil(t, ars.Spec.GitHubServerTLS)
expected := &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@@ -1826,7 +2066,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinDMergePodSpec(t *testing.T) {
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "There should be 2 containers")
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "There should be 1 containers")
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set")
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set")
@@ -1951,40 +2191,44 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing.
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
for _, mode := range []string{"kubernetes", "kubernetes-novolume"} {
t.Run("containerMode "+mode, func(t *testing.T) {
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"containerMode.type": "kubernetes",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"containerMode.type": mode,
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
annotationValues := map[string]string{
actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-rs-github-secret",
actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-rs-manager",
actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-rs-manager",
actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-rs-kube-mode",
actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-rs-kube-mode",
actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-rs-kube-mode",
}
annotationValues := map[string]string{
actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-rs-github-secret",
actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-rs-manager",
actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-rs-manager",
actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-rs-kube-mode",
actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-rs-kube-mode",
actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-rs-kube-mode",
}
for annotation, value := range annotationValues {
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
for annotation, value := range annotationValues {
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
}
})
}
}
@@ -2416,6 +2660,21 @@ func TestNamespaceOverride(t *testing.T) {
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
"kube_novolume_mode_role": {
file: "kube_mode_role.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"containerMode.type": "kubernetes-novolume",
"controllerServiceAccount.name": "foo",
"controllerServiceAccount.namespace": "bar",
"githubConfigSecret.github_token": "gh_token12345",
"githubConfigUrl": "https://github.com",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
"kube_mode_role_binding": {
file: "kube_mode_role_binding.yaml",
options: &helm.Options{
@@ -2431,6 +2690,21 @@ func TestNamespaceOverride(t *testing.T) {
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
"kube_novolume_mode_role_binding": {
file: "kube_mode_role_binding.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"containerMode.type": "kubernetes-novolume",
"controllerServiceAccount.name": "foo",
"controllerServiceAccount.namespace": "bar",
"githubConfigSecret.github_token": "gh_token12345",
"githubConfigUrl": "https://github.com",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
"kube_mode_serviceaccount": {
file: "kube_mode_serviceaccount.yaml",
options: &helm.Options{
@@ -2446,6 +2720,21 @@ func TestNamespaceOverride(t *testing.T) {
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
"kube_novolume_mode_serviceaccount": {
file: "kube_mode_serviceaccount.yaml",
options: &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"namespaceOverride": namespaceOverride,
"containerMode.type": "kubernetes-novolume",
"controllerServiceAccount.name": "foo",
"controllerServiceAccount.namespace": "bar",
"githubConfigSecret.github_token": "gh_token12345",
"githubConfigUrl": "https://github.com",
},
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
},
},
}
for name, tc := range tt {
@@ -2468,3 +2757,43 @@ func TestNamespaceOverride(t *testing.T) {
})
}
}
func TestAutoscalingRunnerSetCustomAnnotationsAndLabelsApplied(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
"annotations.actions\\.github\\.com/vault": "azure_key_vault",
"annotations.actions\\.github\\.com/cleanup-manager-role-name": "not-propagated",
"labels.custom": "custom",
"labels.app\\.kubernetes\\.io/component": "not-propagated",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
vault := autoscalingRunnerSet.Annotations["actions.github.com/vault"]
assert.Equal(t, "azure_key_vault", vault)
custom := autoscalingRunnerSet.Labels["custom"]
assert.Equal(t, "custom", custom)
assert.NotEqual(t, "not-propagated", autoscalingRunnerSet.Annotations["actions.github.com/cleanup-manager-role-name"])
assert.NotEqual(t, "not-propagated", autoscalingRunnerSet.Labels["app.kubernetes.io/component"])
}

View File

@@ -1,12 +1,12 @@
## githubConfigUrl is the GitHub url for where you want to configure runners
## ex: https://github.com/myorg/myrepo or https://github.com/myorg
## ex: https://github.com/myorg/myrepo or https://github.com/myorg or https://github.com/enterprises/myenterprise
githubConfigUrl: ""
## githubConfigSecret is the k8s secret information to use when authenticating via the GitHub API.
## You can choose to supply:
## A) a PAT token,
## B) a GitHub App, or
## C) a pre-defined Kubernetes secret.
## C) a pre-defined secret.
## The syntax for each of these variations is documented below.
## (Variation A) When using a PAT token, the syntax is as follows:
githubConfigSecret:
@@ -17,6 +17,7 @@ githubConfigSecret:
## (Variation B) When using a GitHub App, the syntax is as follows:
# githubConfigSecret:
# # NOTE: IDs MUST be strings, use quotes
# # The github_app_id can be an app_id or the client_id
# github_app_id: ""
# github_app_installation_id: ""
# github_app_private_key: |
@@ -27,8 +28,11 @@ githubConfigSecret:
# .
# private key line N
#
## (Variation C) When using a pre-defined Kubernetes secret in the same namespace that the gha-runner-scale-set is going to deploy,
## the syntax is as follows:
## (Variation C) When using a pre-defined secret.
## The secret can be pulled either directly from Kubernetes, or from the vault, depending on configuration.
## Kubernetes secret in the same namespace that the gha-runner-scale-set is going to deploy.
## On the other hand, if the vault is configured, secret name will be used to fetch the app configuration.
## The syntax is as follows:
# githubConfigSecret: pre-defined-secret
## Notes on using pre-defined Kubernetes secrets:
## You need to make sure your predefined secret has all the required secret data set properly.
@@ -84,6 +88,26 @@ githubConfigSecret:
# key: ca.crt
# runnerMountPath: /usr/local/share/ca-certificates/
# keyVault:
# Available values: "azure_key_vault"
# type: ""
# Configuration related to azure key vault
# azure_key_vault:
# url: ""
# client_id: ""
# tenant_id: ""
# certificate_path: ""
# proxy:
# http:
# url: http://proxy.com:1234
# credentialSecretRef: proxy-auth # a secret with `username` and `password` keys
# https:
# url: http://proxy.com:1234
# credentialSecretRef: proxy-auth # a secret with `username` and `password` keys
# noProxy:
# - example.com
# - example.org
## Container mode is an object that provides out-of-box configuration
## for dind and kubernetes mode. Template will be modified as documented under the
## template object.
@@ -91,7 +115,7 @@ githubConfigSecret:
## If any customization is required for dind or kubernetes mode, containerMode should remain
## empty, and configuration should be applied to the template.
# containerMode:
# type: "dind" ## type can be set to dind or kubernetes
# type: "dind" ## type can be set to "dind", "kubernetes", or "kubernetes-novolume"
# ## the following is required when containerMode.type=kubernetes
# kubernetesModeWorkVolumeClaim:
# accessModes: ["ReadWriteOnce"]
@@ -130,7 +154,7 @@ githubConfigSecret:
# counters:
# gha_started_jobs_total:
# labels:
# ["repository", "organization", "enterprise", "job_name", "event_name"]
# ["repository", "organization", "enterprise", "job_name", "event_name", "job_workflow_ref", "job_workflow_name", "job_workflow_target"]
# gha_completed_jobs_total:
# labels:
# [
@@ -140,6 +164,9 @@ githubConfigSecret:
# "job_name",
# "event_name",
# "job_result",
# "job_workflow_ref",
# "job_workflow_name",
# "job_workflow_target",
# ]
# gauges:
# gha_assigned_jobs:
@@ -161,7 +188,7 @@ githubConfigSecret:
# histograms:
# gha_job_startup_duration_seconds:
# labels:
# ["repository", "organization", "enterprise", "job_name", "event_name"]
# ["repository", "organization", "enterprise", "job_name", "event_name","job_workflow_ref", "job_workflow_name", "job_workflow_target"]
# buckets:
# [
# 0.01,
@@ -219,6 +246,9 @@ githubConfigSecret:
# "job_name",
# "event_name",
# "job_result",
# "job_workflow_ref",
# "job_workflow_name",
# "job_workflow_target"
# ]
# buckets:
# [
@@ -283,18 +313,6 @@ template:
## volumeMounts:
## - name: dind-externals
## mountPath: /home/runner/tmpDir
## containers:
## - name: runner
## image: ghcr.io/actions/actions-runner:latest
## command: ["/home/runner/run.sh"]
## env:
## - name: DOCKER_HOST
## value: unix:///var/run/docker.sock
## volumeMounts:
## - name: work
## mountPath: /home/runner/_work
## - name: dind-sock
## mountPath: /var/run
## - name: dind
## image: docker:dind
## args:
@@ -306,6 +324,15 @@ template:
## value: "123"
## securityContext:
## privileged: true
## restartPolicy: Always
## startupProbe:
## exec:
## command:
## - docker
## - info
## initialDelaySeconds: 0
## failureThreshold: 24
## periodSeconds: 5
## volumeMounts:
## - name: work
## mountPath: /home/runner/_work
@@ -313,6 +340,20 @@ template:
## mountPath: /var/run
## - name: dind-externals
## mountPath: /home/runner/externals
## containers:
## - name: runner
## image: ghcr.io/actions/actions-runner:latest
## command: ["/home/runner/run.sh"]
## env:
## - name: DOCKER_HOST
## value: unix:///var/run/docker.sock
## - name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS
## value: "120"
## volumeMounts:
## - name: work
## mountPath: /home/runner/_work
## - name: dind-sock
## mountPath: /var/run
## volumes:
## - name: work
## emptyDir: {}
@@ -350,6 +391,25 @@ template:
## resources:
## requests:
## storage: 1Gi
######################################################################################################
## with containerMode.type=kubernetes-novolume, we will populate the template.spec with following pod spec
## template:
## spec:
## containers:
## - name: runner
## image: ghcr.io/actions/actions-runner:latest
## command: ["/home/runner/run.sh"]
## env:
## - name: ACTIONS_RUNNER_CONTAINER_HOOKS
## value: /home/runner/k8s-novolume/index.js
## - name: ACTIONS_RUNNER_POD_NAME
## valueFrom:
## fieldRef:
## fieldPath: metadata.name
## - name: ACTIONS_RUNNER_IMAGE
## value: ghcr.io/actions/actions-runner:latest # should match the runnerimage
## - name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER
## value: "true"
spec:
containers:
- name: runner

View File

@@ -17,7 +17,7 @@ import (
// App is responsible for initializing required components and running the app.
type App struct {
// configured fields
config config.Config
config *config.Config
logger logr.Logger
// initialized fields
@@ -38,8 +38,12 @@ type Worker interface {
}
func New(config config.Config) (*App, error) {
if err := config.Validate(); err != nil {
return nil, fmt.Errorf("failed to validate config: %w", err)
}
app := &App{
config: config,
config: &config,
}
ghConfig, err := actions.ParseGitHubConfigFromURL(config.ConfigureUrl)
@@ -69,8 +73,8 @@ func New(config config.Config) (*App, error) {
Repository: ghConfig.Repository,
ServerAddr: config.MetricsAddr,
ServerEndpoint: config.MetricsEndpoint,
Metrics: config.Metrics,
Logger: app.logger.WithName("metrics exporter"),
Metrics: *config.Metrics,
})
}

View File

@@ -1,6 +1,7 @@
package config
import (
"context"
"crypto/x509"
"encoding/json"
"fmt"
@@ -9,19 +10,26 @@ import (
"os"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/build"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/logging"
"github.com/actions/actions-runner-controller/vault"
"github.com/actions/actions-runner-controller/vault/azurekeyvault"
"github.com/go-logr/logr"
"golang.org/x/net/http/httpproxy"
)
type Config struct {
ConfigureUrl string `json:"configure_url"`
AppID int64 `json:"app_id"`
AppInstallationID int64 `json:"app_installation_id"`
AppPrivateKey string `json:"app_private_key"`
Token string `json:"token"`
ConfigureUrl string `json:"configure_url"`
VaultType vault.VaultType `json:"vault_type"`
VaultLookupKey string `json:"vault_lookup_key"`
// If the VaultType is set to "azure_key_vault", this field must be populated.
AzureKeyVaultConfig *azurekeyvault.Config `json:"azure_key_vault,omitempty"`
// AppConfig contains the GitHub App configuration.
// It is initially set to nil if VaultType is set.
// Otherwise, it is populated with the GitHub App credentials from the GitHub secret.
*appconfig.AppConfig
EphemeralRunnerSetNamespace string `json:"ephemeral_runner_set_namespace"`
EphemeralRunnerSetName string `json:"ephemeral_runner_set_name"`
MaxRunners int `json:"max_runners"`
@@ -36,23 +44,58 @@ type Config struct {
Metrics *v1alpha1.MetricsConfig `json:"metrics"`
}
func Read(path string) (Config, error) {
f, err := os.Open(path)
func Read(ctx context.Context, configPath string) (*Config, error) {
f, err := os.Open(configPath)
if err != nil {
return Config{}, err
return nil, err
}
defer f.Close()
var config Config
if err := json.NewDecoder(f).Decode(&config); err != nil {
return Config{}, fmt.Errorf("failed to decode config: %w", err)
return nil, fmt.Errorf("failed to decode config: %w", err)
}
var vault vault.Vault
switch config.VaultType {
case "":
if err := config.Validate(); err != nil {
return nil, fmt.Errorf("failed to validate configuration: %v", err)
}
return &config, nil
case "azure_key_vault":
akv, err := azurekeyvault.New(*config.AzureKeyVaultConfig)
if err != nil {
return nil, fmt.Errorf("failed to create Azure Key Vault client: %w", err)
}
vault = akv
default:
return nil, fmt.Errorf("unsupported vault type: %s", config.VaultType)
}
appConfigRaw, err := vault.GetSecret(ctx, config.VaultLookupKey)
if err != nil {
return nil, fmt.Errorf("failed to get app config from vault: %w", err)
}
appConfig, err := appconfig.FromJSONString(appConfigRaw)
if err != nil {
return nil, fmt.Errorf("failed to read app config from string: %v", err)
}
config.AppConfig = appConfig
if err := config.Validate(); err != nil {
return Config{}, fmt.Errorf("failed to validate config: %w", err)
return nil, fmt.Errorf("config validation failed: %w", err)
}
return config, nil
if ctx.Err() != nil {
return nil, ctx.Err()
}
return &config, nil
}
// Validate checks the configuration for errors.
@@ -62,26 +105,30 @@ func (c *Config) Validate() error {
}
if len(c.EphemeralRunnerSetNamespace) == 0 || len(c.EphemeralRunnerSetName) == 0 {
return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
return fmt.Errorf("EphemeralRunnerSetNamespace %q or EphemeralRunnerSetName %q is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
}
if c.RunnerScaleSetId == 0 {
return fmt.Errorf("RunnerScaleSetId '%d' is missing", c.RunnerScaleSetId)
return fmt.Errorf(`RunnerScaleSetId "%d" is missing`, c.RunnerScaleSetId)
}
if c.MaxRunners < c.MinRunners {
return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", c.MinRunners, c.MaxRunners)
return fmt.Errorf(`MinRunners "%d" cannot be greater than MaxRunners "%d"`, c.MinRunners, c.MaxRunners)
}
hasToken := len(c.Token) > 0
hasPrivateKeyConfig := c.AppID > 0 && c.AppPrivateKey != ""
if !hasToken && !hasPrivateKeyConfig {
return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
if c.VaultType != "" {
if err := c.VaultType.Validate(); err != nil {
return fmt.Errorf("VaultType validation failed: %w", err)
}
if c.VaultLookupKey == "" {
return fmt.Errorf("VaultLookupKey is required when VaultType is set to %q", c.VaultType)
}
}
if hasToken && hasPrivateKeyConfig {
return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
if c.VaultType == "" && c.VaultLookupKey == "" {
if err := c.AppConfig.Validate(); err != nil {
return fmt.Errorf("AppConfig validation failed: %w", err)
}
}
return nil

View File

@@ -9,6 +9,7 @@ import (
"path/filepath"
"testing"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/github/actions/testserver"
@@ -53,7 +54,9 @@ func TestCustomerServerRootCA(t *testing.T) {
config := config.Config{
ConfigureUrl: server.ConfigURLForOrg("myorg"),
ServerRootCA: certsString,
Token: "token",
AppConfig: &appconfig.AppConfig{
Token: "token",
},
}
client, err := config.ActionsClient(logr.Discard())
@@ -80,7 +83,9 @@ func TestProxySettings(t *testing.T) {
config := config.Config{
ConfigureUrl: "https://github.com/org/repo",
Token: "token",
AppConfig: &appconfig.AppConfig{
Token: "token",
},
}
client, err := config.ActionsClient(logr.Discard())
@@ -110,7 +115,9 @@ func TestProxySettings(t *testing.T) {
config := config.Config{
ConfigureUrl: "https://github.com/org/repo",
Token: "token",
AppConfig: &appconfig.AppConfig{
Token: "token",
},
}
client, err := config.ActionsClient(logr.Discard(), actions.WithRetryMax(0))
@@ -145,7 +152,9 @@ func TestProxySettings(t *testing.T) {
config := config.Config{
ConfigureUrl: "https://github.com/org/repo",
Token: "token",
AppConfig: &appconfig.AppConfig{
Token: "token",
},
}
client, err := config.ActionsClient(logr.Discard())

View File

@@ -1,92 +0,0 @@
package config
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestConfigValidationMinMax(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 5,
MaxRunners: 2,
Token: "token",
}
err := config.Validate()
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
}
func TestConfigValidationMissingToken(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationAppKey(t *testing.T) {
config := &Config{
AppID: 1,
AppInstallationID: 10,
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
config := &Config{
AppID: 1,
AppInstallationID: 10,
AppPrivateKey: "asdf",
Token: "asdf",
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidation(t *testing.T) {
config := &Config{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
Token: "asdf",
}
err := config.Validate()
assert.NoError(t, err, "Expected no error")
}
func TestConfigValidationConfigUrl(t *testing.T) {
config := &Config{
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
}

View File

@@ -0,0 +1,170 @@
package config
import (
"testing"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/vault"
"github.com/stretchr/testify/assert"
)
func TestConfigValidationMinMax(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 5,
MaxRunners: 2,
AppConfig: &appconfig.AppConfig{
Token: "token",
},
}
err := config.Validate()
assert.ErrorContains(t, err, `MinRunners "5" cannot be greater than MaxRunners "2"`, "Expected error about MinRunners > MaxRunners")
}
func TestConfigValidationMissingToken(t *testing.T) {
config := &Config{
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: missing app config"
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidationAppKey(t *testing.T) {
t.Parallel()
t.Run("app id integer", func(t *testing.T) {
t.Parallel()
config := &Config{
AppConfig: &appconfig.AppConfig{
AppID: "1",
AppInstallationID: 10,
},
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: no credentials provided: either a PAT or GitHub App credentials should be provided"
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
})
t.Run("app id as client id", func(t *testing.T) {
t.Parallel()
config := &Config{
AppConfig: &appconfig.AppConfig{
AppID: "Iv23f8doAlphaNumer1c",
AppInstallationID: 10,
},
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: no credentials provided: either a PAT or GitHub App credentials should be provided"
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
})
}
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
config := &Config{
AppConfig: &appconfig.AppConfig{
AppID: "1",
AppInstallationID: 10,
AppPrivateKey: "asdf",
Token: "asdf",
},
ConfigureUrl: "github.com/some_org/some_repo",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
expectedError := "AppConfig validation failed: both PAT and GitHub App credentials provided. should only provide one"
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
}
func TestConfigValidation(t *testing.T) {
config := &Config{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
AppConfig: &appconfig.AppConfig{
Token: "asdf",
},
}
err := config.Validate()
assert.NoError(t, err, "Expected no error")
}
func TestConfigValidationConfigUrl(t *testing.T) {
config := &Config{
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
}
err := config.Validate()
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
}
func TestConfigValidationWithVaultConfig(t *testing.T) {
t.Run("valid", func(t *testing.T) {
config := &Config{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
VaultType: vault.VaultTypeAzureKeyVault,
VaultLookupKey: "testkey",
}
err := config.Validate()
assert.NoError(t, err, "Expected no error for valid vault type")
})
t.Run("invalid vault type", func(t *testing.T) {
config := &Config{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
VaultType: vault.VaultType("invalid_vault_type"),
VaultLookupKey: "testkey",
}
err := config.Validate()
assert.ErrorContains(t, err, `unknown vault type: "invalid_vault_type"`, "Expected error for invalid vault type")
})
t.Run("vault type set without lookup key", func(t *testing.T) {
config := &Config{
ConfigureUrl: "https://github.com/actions",
EphemeralRunnerSetNamespace: "namespace",
EphemeralRunnerSetName: "deployment",
RunnerScaleSetId: 1,
MinRunners: 1,
MaxRunners: 5,
VaultType: vault.VaultTypeAzureKeyVault,
VaultLookupKey: "",
}
err := config.Validate()
assert.ErrorContains(t, err, `VaultLookupKey is required when VaultType is set to "azure_key_vault"`, "Expected error for vault type without lookup key")
})
}

View File

@@ -361,7 +361,7 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
return nil, fmt.Errorf("failed to decode job available: %w", err)
}
l.logger.Info("Job available message received", "jobId", jobAvailable.RunnerRequestId)
l.logger.Info("Job available message received", "jobId", jobAvailable.JobID)
parsedMsg.jobsAvailable = append(parsedMsg.jobsAvailable, &jobAvailable)
case messageTypeJobAssigned:
@@ -370,14 +370,14 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
return nil, fmt.Errorf("failed to decode job assigned: %w", err)
}
l.logger.Info("Job assigned message received", "jobId", jobAssigned.RunnerRequestId)
l.logger.Info("Job assigned message received", "jobId", jobAssigned.JobID)
case messageTypeJobStarted:
var jobStarted actions.JobStarted
if err := json.Unmarshal(msg, &jobStarted); err != nil {
return nil, fmt.Errorf("could not decode job started message. %w", err)
}
l.logger.Info("Job started message received.", "RequestId", jobStarted.RunnerRequestId, "RunnerId", jobStarted.RunnerId)
l.logger.Info("Job started message received.", "JobID", jobStarted.JobID, "RunnerId", jobStarted.RunnerID)
parsedMsg.jobsStarted = append(parsedMsg.jobsStarted, &jobStarted)
case messageTypeJobCompleted:
@@ -386,7 +386,13 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
return nil, fmt.Errorf("failed to decode job completed: %w", err)
}
l.logger.Info("Job completed message received.", "RequestId", jobCompleted.RunnerRequestId, "Result", jobCompleted.Result, "RunnerId", jobCompleted.RunnerId, "RunnerName", jobCompleted.RunnerName)
l.logger.Info(
"Job completed message received.",
"JobID", jobCompleted.JobID,
"Result", jobCompleted.Result,
"RunnerId", jobCompleted.RunnerId,
"RunnerName", jobCompleted.RunnerName,
)
parsedMsg.jobsCompleted = append(parsedMsg.jobsCompleted, &jobCompleted)
default:
@@ -400,7 +406,7 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
ids := make([]int64, 0, len(jobsAvailable))
for _, job := range jobsAvailable {
ids = append(ids, job.RunnerRequestId)
ids = append(ids, job.RunnerRequestID)
}
l.logger.Info("Acquiring jobs", "count", len(ids), "requestIds", fmt.Sprint(ids))

View File

@@ -627,17 +627,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
RunnerRequestID: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
RunnerRequestID: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
RunnerRequestID: 3,
},
},
}
@@ -678,17 +678,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
RunnerRequestID: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
RunnerRequestID: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
RunnerRequestID: 3,
},
},
}
@@ -724,17 +724,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
RunnerRequestID: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
RunnerRequestID: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
RunnerRequestID: 3,
},
},
}
@@ -809,17 +809,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
availableJobs := []*actions.JobAvailable{
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 1,
RunnerRequestID: 1,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 2,
RunnerRequestID: 2,
},
},
{
JobMessageBase: actions.JobMessageBase{
RunnerRequestId: 3,
RunnerRequestID: 3,
},
},
}
@@ -881,7 +881,7 @@ func TestListener_parseMessage(t *testing.T) {
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAvailable,
},
RunnerRequestId: 1,
RunnerRequestID: 1,
},
},
{
@@ -890,7 +890,7 @@ func TestListener_parseMessage(t *testing.T) {
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAvailable,
},
RunnerRequestId: 2,
RunnerRequestID: 2,
},
},
}
@@ -904,7 +904,7 @@ func TestListener_parseMessage(t *testing.T) {
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAssigned,
},
RunnerRequestId: 3,
RunnerRequestID: 3,
},
},
{
@@ -912,7 +912,7 @@ func TestListener_parseMessage(t *testing.T) {
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobAssigned,
},
RunnerRequestId: 4,
RunnerRequestID: 4,
},
},
}
@@ -926,9 +926,9 @@ func TestListener_parseMessage(t *testing.T) {
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobStarted,
},
RunnerRequestId: 5,
RunnerRequestID: 5,
},
RunnerId: 2,
RunnerID: 2,
RunnerName: "runner2",
},
}
@@ -942,7 +942,7 @@ func TestListener_parseMessage(t *testing.T) {
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestId: 6,
RunnerRequestID: 6,
},
Result: "success",
RunnerId: 1,

View File

@@ -123,9 +123,9 @@ func TestHandleMessageMetrics(t *testing.T) {
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobStarted,
},
RunnerRequestId: 8,
RunnerRequestID: 8,
},
RunnerId: 3,
RunnerID: 3,
RunnerName: "runner3",
},
}
@@ -139,7 +139,7 @@ func TestHandleMessageMetrics(t *testing.T) {
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestId: 6,
RunnerRequestID: 6,
},
Result: "success",
RunnerId: 1,
@@ -150,7 +150,7 @@ func TestHandleMessageMetrics(t *testing.T) {
JobMessageType: actions.JobMessageType{
MessageType: messageTypeJobCompleted,
},
RunnerRequestId: 7,
RunnerRequestID: 7,
},
Result: "success",
RunnerId: 2,

View File

@@ -13,26 +13,27 @@ import (
)
func main() {
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop()
configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
if !ok {
fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
os.Exit(1)
}
config, err := config.Read(configPath)
config, err := config.Read(ctx, configPath)
if err != nil {
log.Printf("Failed to read config: %v", err)
os.Exit(1)
}
app, err := app.New(config)
app, err := app.New(*config)
if err != nil {
log.Printf("Failed to initialize app: %v", err)
os.Exit(1)
}
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop()
if err := app.Run(ctx); err != nil {
log.Printf("Application returned an error: %v", err)
os.Exit(1)

View File

@@ -21,6 +21,9 @@ const (
labelKeyOrganization = "organization"
labelKeyRepository = "repository"
labelKeyJobName = "job_name"
labelKeyJobWorkflowRef = "job_workflow_ref"
labelKeyJobWorkflowName = "job_workflow_name"
labelKeyJobWorkflowTarget = "job_workflow_target"
labelKeyEventName = "event_name"
labelKeyJobResult = "job_result"
)
@@ -74,12 +77,16 @@ var metricsHelp = metricsHelpRegistry{
}
func (e *exporter) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
workflowRefInfo := ParseWorkflowRef(jobBase.JobWorkflowRef)
return prometheus.Labels{
labelKeyEnterprise: e.scaleSetLabels[labelKeyEnterprise],
labelKeyOrganization: jobBase.OwnerName,
labelKeyRepository: jobBase.RepositoryName,
labelKeyJobName: jobBase.JobDisplayName,
labelKeyEventName: jobBase.EventName,
labelKeyEnterprise: e.scaleSetLabels[labelKeyEnterprise],
labelKeyOrganization: jobBase.OwnerName,
labelKeyRepository: jobBase.RepositoryName,
labelKeyJobName: jobBase.JobDisplayName,
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
labelKeyJobWorkflowName: workflowRefInfo.Name,
labelKeyJobWorkflowTarget: workflowRefInfo.Target,
labelKeyEventName: jobBase.EventName,
}
}
@@ -152,13 +159,148 @@ type ExporterConfig struct {
ServerAddr string
ServerEndpoint string
Logger logr.Logger
Metrics v1alpha1.MetricsConfig
Metrics *v1alpha1.MetricsConfig
}
var defaultMetrics = v1alpha1.MetricsConfig{
Counters: map[string]*v1alpha1.CounterMetric{
MetricStartedJobsTotal: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyJobName,
labelKeyEventName,
},
},
MetricCompletedJobsTotal: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyJobName,
labelKeyEventName,
labelKeyJobResult,
},
},
},
Gauges: map[string]*v1alpha1.GaugeMetric{
MetricAssignedJobs: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
MetricRunningJobs: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
MetricRegisteredRunners: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
MetricBusyRunners: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
MetricMinRunners: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
MetricMaxRunners: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
MetricDesiredRunners: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
MetricIdleRunners: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyRunnerScaleSetName,
labelKeyRunnerScaleSetNamespace,
},
},
},
Histograms: map[string]*v1alpha1.HistogramMetric{
MetricJobStartupDurationSeconds: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyJobName,
labelKeyEventName,
},
Buckets: defaultRuntimeBuckets,
},
MetricJobExecutionDurationSeconds: {
Labels: []string{
labelKeyEnterprise,
labelKeyOrganization,
labelKeyRepository,
labelKeyJobName,
labelKeyEventName,
labelKeyJobResult,
},
Buckets: defaultRuntimeBuckets,
},
},
}
func (e *ExporterConfig) defaults() {
if e.ServerAddr == "" {
e.ServerAddr = ":8080"
}
if e.ServerEndpoint == "" {
e.ServerEndpoint = "/metrics"
}
if e.Metrics == nil {
defaultMetrics := defaultMetrics
e.Metrics = &defaultMetrics
}
}
func NewExporter(config ExporterConfig) ServerExporter {
config.defaults()
reg := prometheus.NewRegistry()
metrics := installMetrics(config.Metrics, reg, config.Logger)
metrics := installMetrics(*config.Metrics, reg, config.Logger)
mux := http.NewServeMux()
mux.Handle(
@@ -287,7 +429,7 @@ func (e *exporter) ListenAndServe(ctx context.Context) error {
}
func (e *exporter) setGauge(name string, allLabels prometheus.Labels, val float64) {
m, ok := e.metrics.gauges[name]
m, ok := e.gauges[name]
if !ok {
return
}
@@ -299,7 +441,7 @@ func (e *exporter) setGauge(name string, allLabels prometheus.Labels, val float6
}
func (e *exporter) incCounter(name string, allLabels prometheus.Labels) {
m, ok := e.metrics.counters[name]
m, ok := e.counters[name]
if !ok {
return
}
@@ -311,7 +453,7 @@ func (e *exporter) incCounter(name string, allLabels prometheus.Labels) {
}
func (e *exporter) observeHistogram(name string, allLabels prometheus.Labels, val float64) {
m, ok := e.metrics.histograms[name]
m, ok := e.histograms[name]
if !ok {
return
}
@@ -331,7 +473,7 @@ func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
e.setGauge(MetricAssignedJobs, e.scaleSetLabels, float64(stats.TotalAssignedJobs))
e.setGauge(MetricRunningJobs, e.scaleSetLabels, float64(stats.TotalRunningJobs))
e.setGauge(MetricRegisteredRunners, e.scaleSetLabels, float64(stats.TotalRegisteredRunners))
e.setGauge(MetricBusyRunners, e.scaleSetLabels, float64(float64(stats.TotalRegisteredRunners)))
e.setGauge(MetricBusyRunners, e.scaleSetLabels, float64(stats.TotalBusyRunners))
e.setGauge(MetricIdleRunners, e.scaleSetLabels, float64(stats.TotalIdleRunners))
}
@@ -339,7 +481,7 @@ func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
l := e.startedJobLabels(msg)
e.incCounter(MetricStartedJobsTotal, l)
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
startupDuration := msg.RunnerAssignTime.Unix() - msg.ScaleSetAssignTime.Unix()
e.observeHistogram(MetricJobStartupDurationSeconds, l, float64(startupDuration))
}
@@ -347,7 +489,7 @@ func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
l := e.completedJobLabels(msg)
e.incCounter(MetricCompletedJobsTotal, l)
executionDuration := msg.JobMessageBase.FinishTime.Unix() - msg.JobMessageBase.RunnerAssignTime.Unix()
executionDuration := msg.FinishTime.Unix() - msg.RunnerAssignTime.Unix()
e.observeHistogram(MetricJobExecutionDurationSeconds, l, float64(executionDuration))
}

View File

@@ -0,0 +1,99 @@
package metrics
import (
"testing"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
)
func TestMetricsWithWorkflowRefParsing(t *testing.T) {
// Create a test exporter
exporter := &exporter{
scaleSetLabels: prometheus.Labels{
labelKeyEnterprise: "test-enterprise",
labelKeyOrganization: "test-org",
labelKeyRepository: "test-repo",
labelKeyRunnerScaleSetName: "test-scale-set",
labelKeyRunnerScaleSetNamespace: "test-namespace",
},
}
tests := []struct {
name string
jobBase actions.JobMessageBase
wantName string
wantTarget string
}{
{
name: "main branch workflow",
jobBase: actions.JobMessageBase{
OwnerName: "actions",
RepositoryName: "runner",
JobDisplayName: "Build and Test",
JobWorkflowRef: "actions/runner/.github/workflows/build.yml@refs/heads/main",
EventName: "push",
},
wantName: "build",
wantTarget: "heads/main",
},
{
name: "feature branch workflow",
jobBase: actions.JobMessageBase{
OwnerName: "myorg",
RepositoryName: "myrepo",
JobDisplayName: "CI/CD Pipeline",
JobWorkflowRef: "myorg/myrepo/.github/workflows/ci-cd-pipeline.yml@refs/heads/feature/new-metrics",
EventName: "push",
},
wantName: "ci-cd-pipeline",
wantTarget: "heads/feature/new-metrics",
},
{
name: "pull request workflow",
jobBase: actions.JobMessageBase{
OwnerName: "actions",
RepositoryName: "runner",
JobDisplayName: "PR Checks",
JobWorkflowRef: "actions/runner/.github/workflows/pr-checks.yml@refs/pull/123/merge",
EventName: "pull_request",
},
wantName: "pr-checks",
wantTarget: "pull/123",
},
{
name: "tag workflow",
jobBase: actions.JobMessageBase{
OwnerName: "actions",
RepositoryName: "runner",
JobDisplayName: "Release",
JobWorkflowRef: "actions/runner/.github/workflows/release.yml@refs/tags/v1.2.3",
EventName: "release",
},
wantName: "release",
wantTarget: "tags/v1.2.3",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
labels := exporter.jobLabels(&tt.jobBase)
// Build expected labels
expectedLabels := prometheus.Labels{
labelKeyEnterprise: "test-enterprise",
labelKeyOrganization: tt.jobBase.OwnerName,
labelKeyRepository: tt.jobBase.RepositoryName,
labelKeyJobName: tt.jobBase.JobDisplayName,
labelKeyJobWorkflowRef: tt.jobBase.JobWorkflowRef,
labelKeyJobWorkflowName: tt.wantName,
labelKeyJobWorkflowTarget: tt.wantTarget,
labelKeyEventName: tt.jobBase.EventName,
}
// Assert all expected labels match
assert.Equal(t, expectedLabels, labels, "jobLabels() returned unexpected labels for %s", tt.name)
})
}
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestInstallMetrics(t *testing.T) {
@@ -86,3 +87,179 @@ func TestInstallMetrics(t *testing.T) {
assert.Equal(t, duration.config.Labels, metricsConfig.Histograms[MetricJobStartupDurationSeconds].Labels)
assert.Equal(t, duration.config.Buckets, defaultRuntimeBuckets)
}
func TestNewExporter(t *testing.T) {
t.Run("with defaults metrics applied", func(t *testing.T) {
config := ExporterConfig{
ScaleSetName: "test-scale-set",
ScaleSetNamespace: "test-namespace",
Enterprise: "",
Organization: "org",
Repository: "repo",
ServerAddr: ":6060",
ServerEndpoint: "/metrics",
Logger: logr.Discard(),
Metrics: nil, // when metrics is nil, all default metrics should be registered
}
exporter, ok := NewExporter(config).(*exporter)
require.True(t, ok, "expected exporter to be of type *exporter")
require.NotNil(t, exporter)
reg := prometheus.NewRegistry()
wantMetrics := installMetrics(defaultMetrics, reg, config.Logger)
assert.Equal(t, len(wantMetrics.counters), len(exporter.counters))
for k, v := range wantMetrics.counters {
assert.Contains(t, exporter.counters, k)
assert.Equal(t, v.config, exporter.counters[k].config)
}
assert.Equal(t, len(wantMetrics.gauges), len(exporter.gauges))
for k, v := range wantMetrics.gauges {
assert.Contains(t, exporter.gauges, k)
assert.Equal(t, v.config, exporter.gauges[k].config)
}
assert.Equal(t, len(wantMetrics.histograms), len(exporter.histograms))
for k, v := range wantMetrics.histograms {
assert.Contains(t, exporter.histograms, k)
assert.Equal(t, v.config, exporter.histograms[k].config)
}
require.NotNil(t, exporter.srv)
assert.Equal(t, config.ServerAddr, exporter.srv.Addr)
})
t.Run("with default server URL", func(t *testing.T) {
config := ExporterConfig{
ScaleSetName: "test-scale-set",
ScaleSetNamespace: "test-namespace",
Enterprise: "",
Organization: "org",
Repository: "repo",
ServerAddr: "", // empty ServerAddr should default to ":8080"
ServerEndpoint: "",
Logger: logr.Discard(),
Metrics: nil, // when metrics is nil, all default metrics should be registered
}
exporter, ok := NewExporter(config).(*exporter)
require.True(t, ok, "expected exporter to be of type *exporter")
require.NotNil(t, exporter)
reg := prometheus.NewRegistry()
wantMetrics := installMetrics(defaultMetrics, reg, config.Logger)
assert.Equal(t, len(wantMetrics.counters), len(exporter.counters))
for k, v := range wantMetrics.counters {
assert.Contains(t, exporter.counters, k)
assert.Equal(t, v.config, exporter.counters[k].config)
}
assert.Equal(t, len(wantMetrics.gauges), len(exporter.gauges))
for k, v := range wantMetrics.gauges {
assert.Contains(t, exporter.gauges, k)
assert.Equal(t, v.config, exporter.gauges[k].config)
}
assert.Equal(t, len(wantMetrics.histograms), len(exporter.histograms))
for k, v := range wantMetrics.histograms {
assert.Contains(t, exporter.histograms, k)
assert.Equal(t, v.config, exporter.histograms[k].config)
}
require.NotNil(t, exporter.srv)
assert.Equal(t, exporter.srv.Addr, ":8080")
})
t.Run("with metrics configured", func(t *testing.T) {
metricsConfig := v1alpha1.MetricsConfig{
Counters: map[string]*v1alpha1.CounterMetric{
MetricStartedJobsTotal: {
Labels: []string{labelKeyRepository},
},
},
Gauges: map[string]*v1alpha1.GaugeMetric{
MetricAssignedJobs: {
Labels: []string{labelKeyRepository},
},
},
Histograms: map[string]*v1alpha1.HistogramMetric{
MetricJobExecutionDurationSeconds: {
Labels: []string{labelKeyRepository},
Buckets: []float64{0.1, 1},
},
},
}
config := ExporterConfig{
ScaleSetName: "test-scale-set",
ScaleSetNamespace: "test-namespace",
Enterprise: "",
Organization: "org",
Repository: "repo",
ServerAddr: ":6060",
ServerEndpoint: "/metrics",
Logger: logr.Discard(),
Metrics: &metricsConfig,
}
exporter, ok := NewExporter(config).(*exporter)
require.True(t, ok, "expected exporter to be of type *exporter")
require.NotNil(t, exporter)
reg := prometheus.NewRegistry()
wantMetrics := installMetrics(metricsConfig, reg, config.Logger)
assert.Equal(t, len(wantMetrics.counters), len(exporter.counters))
for k, v := range wantMetrics.counters {
assert.Contains(t, exporter.counters, k)
assert.Equal(t, v.config, exporter.counters[k].config)
}
assert.Equal(t, len(wantMetrics.gauges), len(exporter.gauges))
for k, v := range wantMetrics.gauges {
assert.Contains(t, exporter.gauges, k)
assert.Equal(t, v.config, exporter.gauges[k].config)
}
assert.Equal(t, len(wantMetrics.histograms), len(exporter.histograms))
for k, v := range wantMetrics.histograms {
assert.Contains(t, exporter.histograms, k)
assert.Equal(t, v.config, exporter.histograms[k].config)
}
require.NotNil(t, exporter.srv)
assert.Equal(t, config.ServerAddr, exporter.srv.Addr)
})
}
func TestExporterConfigDefaults(t *testing.T) {
config := ExporterConfig{
ScaleSetName: "test-scale-set",
ScaleSetNamespace: "test-namespace",
Enterprise: "",
Organization: "org",
Repository: "repo",
ServerAddr: "",
ServerEndpoint: "",
Logger: logr.Discard(),
Metrics: nil, // when metrics is nil, all default metrics should be registered
}
config.defaults()
want := ExporterConfig{
ScaleSetName: "test-scale-set",
ScaleSetNamespace: "test-namespace",
Enterprise: "",
Organization: "org",
Repository: "repo",
ServerAddr: ":8080", // default server address
ServerEndpoint: "/metrics", // default server endpoint
Logger: logr.Discard(),
Metrics: &defaultMetrics, // when metrics is nil, all default metrics should be registered
}
assert.Equal(t, want, config)
}

View File

@@ -0,0 +1,78 @@
package metrics
import (
"path"
"strings"
)
// WorkflowRefInfo contains parsed information from a job_workflow_ref
type WorkflowRefInfo struct {
// Name is the workflow file name without extension
Name string
// Target is the target ref with type prefix retained for clarity
// Examples:
// - heads/main (branch)
// - heads/feature/new-feature (branch)
// - tags/v1.2.3 (tag)
// - pull/123 (pull request)
Target string
}
// ParseWorkflowRef parses a job_workflow_ref string to extract workflow name and target
// Format: {owner}/{repo}/.github/workflows/{workflow_file}@{ref}
// Example: mygithuborg/myrepo/.github/workflows/blank.yml@refs/heads/main
//
// The target field preserves type prefixes to differentiate between:
// - Branch references: "heads/{branch}" (from refs/heads/{branch})
// - Tag references: "tags/{tag}" (from refs/tags/{tag})
// - Pull requests: "pull/{number}" (from refs/pull/{number}/merge)
func ParseWorkflowRef(workflowRef string) WorkflowRefInfo {
info := WorkflowRefInfo{}
if workflowRef == "" {
return info
}
// Split by @ to separate path and ref
parts := strings.Split(workflowRef, "@")
if len(parts) != 2 {
return info
}
workflowPath := parts[0]
ref := parts[1]
// Extract workflow name from path
// The path format is: {owner}/{repo}/.github/workflows/{workflow_file}
workflowFile := path.Base(workflowPath)
// Remove .yml or .yaml extension
info.Name = strings.TrimSuffix(strings.TrimSuffix(workflowFile, ".yml"), ".yaml")
// Extract target from ref based on type
// Branch refs: refs/heads/{branch}
// Tag refs: refs/tags/{tag}
// PR refs: refs/pull/{number}/merge
const (
branchPrefix = "refs/heads/"
tagPrefix = "refs/tags/"
prPrefix = "refs/pull/"
)
switch {
case strings.HasPrefix(ref, branchPrefix):
// Keep "heads/" prefix to indicate branch
info.Target = "heads/" + strings.TrimPrefix(ref, branchPrefix)
case strings.HasPrefix(ref, tagPrefix):
// Keep "tags/" prefix to indicate tag
info.Target = "tags/" + strings.TrimPrefix(ref, tagPrefix)
case strings.HasPrefix(ref, prPrefix):
// Extract PR number from refs/pull/{number}/merge
// Keep "pull/" prefix to indicate pull request
prPart := strings.TrimPrefix(ref, prPrefix)
if idx := strings.Index(prPart, "/"); idx > 0 {
info.Target = "pull/" + prPart[:idx]
}
}
return info
}

View File

@@ -0,0 +1,82 @@
package metrics
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseWorkflowRef(t *testing.T) {
tests := []struct {
name string
workflowRef string
wantName string
wantTarget string
}{
{
name: "standard branch reference with yml",
workflowRef: "actions-runner-controller-sandbox/mumoshu-orgrunner-test-01/.github/workflows/blank.yml@refs/heads/main",
wantName: "blank",
wantTarget: "heads/main",
},
{
name: "branch with special characters",
workflowRef: "owner/repo/.github/workflows/ci-cd.yml@refs/heads/feature/new-feature",
wantName: "ci-cd",
wantTarget: "heads/feature/new-feature",
},
{
name: "yaml extension",
workflowRef: "owner/repo/.github/workflows/deploy.yaml@refs/heads/develop",
wantName: "deploy",
wantTarget: "heads/develop",
},
{
name: "tag reference",
workflowRef: "owner/repo/.github/workflows/release.yml@refs/tags/v1.0.0",
wantName: "release",
wantTarget: "tags/v1.0.0",
},
{
name: "pull request reference",
workflowRef: "owner/repo/.github/workflows/test.yml@refs/pull/123/merge",
wantName: "test",
wantTarget: "pull/123",
},
{
name: "empty workflow ref",
workflowRef: "",
wantName: "",
wantTarget: "",
},
{
name: "invalid format - no @ separator",
workflowRef: "owner/repo/.github/workflows/test.yml",
wantName: "",
wantTarget: "",
},
{
name: "workflow with dots in name",
workflowRef: "owner/repo/.github/workflows/build.test.yml@refs/heads/main",
wantName: "build.test",
wantTarget: "heads/main",
},
{
name: "workflow with hyphen and underscore",
workflowRef: "owner/repo/.github/workflows/build-test_deploy.yml@refs/heads/main",
wantName: "build-test_deploy",
wantTarget: "heads/main",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := ParseWorkflowRef(tt.workflowRef)
expected := WorkflowRefInfo{
Name: tt.wantName,
Target: tt.wantTarget,
}
assert.Equal(t, expected, got, "ParseWorkflowRef(%q) returned unexpected result", tt.workflowRef)
})
}
}

View File

@@ -100,10 +100,11 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
"runnerName", jobInfo.RunnerName,
"ownerName", jobInfo.OwnerName,
"repoName", jobInfo.RepositoryName,
"jobId", jobInfo.JobID,
"workflowRef", jobInfo.JobWorkflowRef,
"workflowRunId", jobInfo.WorkflowRunId,
"workflowRunId", jobInfo.WorkflowRunID,
"jobDisplayName", jobInfo.JobDisplayName,
"requestId", jobInfo.RunnerRequestId)
"requestId", jobInfo.RunnerRequestID)
original, err := json.Marshal(&v1alpha1.EphemeralRunner{})
if err != nil {
@@ -113,9 +114,10 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
patch, err := json.Marshal(
&v1alpha1.EphemeralRunner{
Status: v1alpha1.EphemeralRunnerStatus{
JobRequestId: jobInfo.RunnerRequestId,
JobRequestId: jobInfo.RunnerRequestID,
JobRepositoryName: fmt.Sprintf("%s/%s", jobInfo.OwnerName, jobInfo.RepositoryName),
WorkflowRunId: jobInfo.WorkflowRunId,
JobID: jobInfo.JobID,
WorkflowRunId: jobInfo.WorkflowRunID,
JobWorkflowRef: jobInfo.JobWorkflowRef,
JobDisplayName: jobInfo.JobDisplayName,
},

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.17.2
controller-gen.kubebuilder.io/version: v0.19.0
name: ephemeralrunners.actions.github.com
spec:
group: actions.github.com
@@ -36,6 +36,9 @@ spec:
- jsonPath: .status.jobDisplayName
name: JobDisplayName
type: string
- jsonPath: .status.jobId
name: JobId
type: string
- jsonPath: .status.message
name: Message
type: string
@@ -427,7 +430,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -442,7 +444,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -603,7 +604,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -618,7 +618,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -707,8 +706,8 @@ spec:
most preferred is the one with the greatest sum of weights, i.e.
for each node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling anti-affinity expressions, etc.),
compute a sum by iterating through the elements of this field and adding
"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
compute a sum by iterating through the elements of this field and subtracting
"weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
@@ -772,7 +771,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -787,7 +785,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -948,7 +945,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -963,7 +959,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1090,7 +1085,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -1144,6 +1141,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -1199,13 +1232,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -1225,7 +1258,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -1474,6 +1509,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: |-
@@ -1864,7 +1905,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -1915,10 +1956,10 @@ spec:
restartPolicy:
description: |-
RestartPolicy defines the restart behavior of individual containers in a pod.
This field may only be set for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
This overrides the pod-level restart policy. When this field is not specified,
the restart behavior is defined by the Pod's restart policy and the container type.
Setting the RestartPolicy as "Always" for the init container will have the following effect:
Additionally, setting the RestartPolicy as "Always" for the init container will
have the following effect:
this init container will be continually restarted on
exit until all regular containers have terminated. Once all regular
containers have completed, all init containers with restartPolicy "Always"
@@ -1930,6 +1971,57 @@ spec:
init container is started, or after any startupProbe has successfully
completed.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. The rules are evaluated in
order. Once a rule matches a container exit condition, the remaining
rules are ignored. If no rule matches the container exit condition,
the Container-level restart policy determines the whether the container
is restarted or not. Constraints on the rules:
- At most 20 rules are allowed.
- Rules can have the same action.
- Identical rules are not forbidden in validations.
When rules are specified, container MUST set RestartPolicy explicitly
even it if matches the Pod's RestartPolicy.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
SecurityContext defines the security options the container should be run with.
@@ -2527,7 +2619,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -2581,6 +2675,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -2636,13 +2766,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -2662,7 +2792,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -2907,6 +3039,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: Probes are not allowed for ephemeral containers.
@@ -3280,7 +3418,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -3332,9 +3470,51 @@ spec:
description: |-
Restart policy for the container to manage the restart behavior of each
container within a pod.
This may only be set for init containers. You cannot set this field on
ephemeral containers.
You cannot set this field on ephemeral containers.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. You cannot set this field on
ephemeral containers.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
Optional: SecurityContext defines the security options the ephemeral container should be run with.
@@ -3853,7 +4033,9 @@ spec:
hostNetwork:
description: |-
Host networking requested for this pod. Use the host's network namespace.
If this option is set, the ports that will be used must be specified.
When using HostNetwork you should specify ports so the scheduler is aware.
When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
Default to false.
type: boolean
hostPID:
@@ -3878,6 +4060,19 @@ spec:
Specifies the hostname of the Pod
If not specified, the pod's hostname will be set to a system-defined value.
type: string
hostnameOverride:
description: |-
HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
This field only specifies the pod's hostname and does not affect its DNS records.
When this field is set to a non-empty string:
- It takes precedence over the values set in `hostname` and `subdomain`.
- The Pod's hostname will be set to this value.
- `setHostnameAsFQDN` must be nil or set to false.
- `hostNetwork` must be set to false.
This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
Requires the HostnameOverride feature gate to be enabled.
type: string
imagePullSecrets:
description: |-
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
@@ -3913,7 +4108,7 @@ spec:
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
The resourceRequirements of an init container are taken into account during scheduling
by finding the highest request/limit for each resource type, and then using the max of
of that value or the sum of the normal containers. Limits are applied to init containers
that value or the sum of the normal containers. Limits are applied to init containers
in a similar fashion.
Init containers cannot currently be added or removed.
Cannot be updated.
@@ -3957,7 +4152,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -4011,6 +4208,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -4066,13 +4299,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -4092,7 +4325,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -4341,6 +4576,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: |-
@@ -4731,7 +4972,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -4782,10 +5023,10 @@ spec:
restartPolicy:
description: |-
RestartPolicy defines the restart behavior of individual containers in a pod.
This field may only be set for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
This overrides the pod-level restart policy. When this field is not specified,
the restart behavior is defined by the Pod's restart policy and the container type.
Setting the RestartPolicy as "Always" for the init container will have the following effect:
Additionally, setting the RestartPolicy as "Always" for the init container will
have the following effect:
this init container will be continually restarted on
exit until all regular containers have terminated. Once all regular
containers have completed, all init containers with restartPolicy "Always"
@@ -4797,6 +5038,57 @@ spec:
init container is started, or after any startupProbe has successfully
completed.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. The rules are evaluated in
order. Once a rule matches a container exit condition, the remaining
rules are ignored. If no rule matches the container exit condition,
the Container-level restart policy determines the whether the container
is restarted or not. Constraints on the rules:
- At most 20 rules are allowed.
- Rules can have the same action.
- Identical rules are not forbidden in validations.
When rules are specified, container MUST set RestartPolicy explicitly
even it if matches the Pod's RestartPolicy.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
SecurityContext defines the security options the container should be run with.
@@ -5310,6 +5602,7 @@ spec:
- spec.hostPID
- spec.hostIPC
- spec.hostUsers
- spec.resources
- spec.securityContext.appArmorProfile
- spec.securityContext.seLinuxOptions
- spec.securityContext.seccompProfile
@@ -5461,7 +5754,7 @@ spec:
description: |-
Resources is the total amount of CPU and Memory resources required by all
containers in the pod. It supports specifying Requests and Limits for
"cpu" and "memory" resource names only. ResourceClaims are not supported.
"cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
This field enables fine-grained control over resource allocation for the
entire pod, allowing resource sharing among containers in a pod.
@@ -5474,7 +5767,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -6002,7 +6295,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -6013,7 +6305,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -6719,15 +7010,13 @@ spec:
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
If specified, the CSI driver will create or update the volume with the attributes defined
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
will be set by the persistentvolume controller if it exists.
it can be changed after the claim is created. An empty string or nil value indicates that no
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
this field can be reset to its previous value (including nil) to cancel the modification.
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -6901,12 +7190,9 @@ spec:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
description: |-
endpoints is the endpoint name that details Glusterfs topology.
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
description: endpoints is the endpoint name that details Glusterfs topology.
type: string
path:
description: |-
@@ -6960,7 +7246,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -6985,7 +7271,7 @@ spec:
description: |-
iscsi represents an ISCSI Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
More info: https://examples.k8s.io/volumes/iscsi/README.md
More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
properties:
chapAuthDiscovery:
description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
@@ -7375,6 +7661,110 @@ spec:
type: array
x-kubernetes-list-type: atomic
type: object
podCertificate:
description: |-
Projects an auto-rotating credential bundle (private key and certificate
chain) that the pod can use either as a TLS client or server.
Kubelet generates a private key and uses it to send a
PodCertificateRequest to the named signer. Once the signer approves the
request and issues a certificate chain, Kubelet writes the key and
certificate chain to the pod filesystem. The pod does not start until
certificates have been issued for each podCertificate projected volume
source in its spec.
Kubelet will begin trying to rotate the certificate at the time indicated
by the signer using the PodCertificateRequest.Status.BeginRefreshAt
timestamp.
Kubelet can write a single file, indicated by the credentialBundlePath
field, or separate files, indicated by the keyPath and
certificateChainPath fields.
The credential bundle is a single file in PEM format. The first PEM
entry is the private key (in PKCS#8 format), and the remaining PEM
entries are the certificate chain issued by the signer (typically,
signers will return their certificate chain in leaf-to-root order).
Prefer using the credential bundle format, since your application code
can read it atomically. If you use keyPath and certificateChainPath,
your application must make two separate file reads. If these coincide
with a certificate rotation, it is possible that the private key and leaf
certificate you read may not correspond to each other. Your application
will need to check for this condition, and re-read until they are
consistent.
The named signer controls chooses the format of the certificate it
issues; consult the signer implementation's documentation to learn how to
use the certificates it issues.
properties:
certificateChainPath:
description: |-
Write the certificate chain at this path in the projected volume.
Most applications should use credentialBundlePath. When using keyPath
and certificateChainPath, your application needs to check that the key
and leaf certificate are consistent, because it is possible to read the
files mid-rotation.
type: string
credentialBundlePath:
description: |-
Write the credential bundle at this path in the projected volume.
The credential bundle is a single file that contains multiple PEM blocks.
The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
key.
The remaining blocks are CERTIFICATE blocks, containing the issued
certificate chain from the signer (leaf and any intermediates).
Using credentialBundlePath lets your Pod's application code make a single
atomic read that retrieves a consistent key and certificate chain. If you
project them to separate files, your application code will need to
additionally check that the leaf certificate was issued to the key.
type: string
keyPath:
description: |-
Write the key at this path in the projected volume.
Most applications should use credentialBundlePath. When using keyPath
and certificateChainPath, your application needs to check that the key
and leaf certificate are consistent, because it is possible to read the
files mid-rotation.
type: string
keyType:
description: |-
The type of keypair Kubelet will generate for the pod.
Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
"ECDSAP521", and "ED25519".
type: string
maxExpirationSeconds:
description: |-
maxExpirationSeconds is the maximum lifetime permitted for the
certificate.
Kubelet copies this value verbatim into the PodCertificateRequests it
generates for this projection.
If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
will reject values shorter than 3600 (1 hour). The maximum allowable
value is 7862400 (91 days).
The signer implementation is then free to issue a certificate with any
lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
seconds (1 hour). This constraint is enforced by kube-apiserver.
`kubernetes.io` signers will never issue certificates with a lifetime
longer than 24 hours.
format: int32
type: integer
signerName:
description: Kubelet's generated CSRs will be addressed to this signer.
type: string
required:
- keyType
- signerName
type: object
secret:
description: secret information about the secret data to project
properties:
@@ -7504,7 +7894,6 @@ spec:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
description: |-
@@ -7784,6 +8173,53 @@ spec:
required:
- containers
type: object
vaultConfig:
properties:
azureKeyVault:
properties:
certificatePath:
type: string
clientId:
type: string
tenantId:
type: string
url:
type: string
required:
- certificatePath
- clientId
- tenantId
- url
type: object
proxy:
properties:
http:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
https:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
noProxy:
items:
type: string
type: array
type: object
type:
description: |-
VaultType represents the type of vault that can be used in the application.
It is used to identify which vault integration should be used to resolve secrets.
type: string
type: object
required:
- githubConfigSecret
- githubConfigUrl
@@ -7794,10 +8230,13 @@ spec:
properties:
failures:
additionalProperties:
type: boolean
format: date-time
type: string
type: object
jobDisplayName:
type: string
jobId:
type: string
jobRepositoryName:
type: string
jobRequestId:
@@ -7826,8 +8265,6 @@ spec:
type: string
runnerId:
type: integer
runnerJITConfig:
type: string
runnerName:
type: string
workflowRunId:
@@ -7839,4 +8276,3 @@ spec:
storage: true
subresources:
status: {}
preserveUnknownFields: false

View File

@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.17.2
controller-gen.kubebuilder.io/version: v0.19.0
name: ephemeralrunnersets.actions.github.com
spec:
group: actions.github.com
@@ -421,7 +421,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -436,7 +435,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -597,7 +595,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -612,7 +609,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -701,8 +697,8 @@ spec:
most preferred is the one with the greatest sum of weights, i.e.
for each node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling anti-affinity expressions, etc.),
compute a sum by iterating through the elements of this field and adding
"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
compute a sum by iterating through the elements of this field and subtracting
"weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
@@ -766,7 +762,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -781,7 +776,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -942,7 +936,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -957,7 +950,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1084,7 +1076,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -1138,6 +1132,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -1193,13 +1223,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -1219,7 +1249,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -1468,6 +1500,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: |-
@@ -1858,7 +1896,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -1909,10 +1947,10 @@ spec:
restartPolicy:
description: |-
RestartPolicy defines the restart behavior of individual containers in a pod.
This field may only be set for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
This overrides the pod-level restart policy. When this field is not specified,
the restart behavior is defined by the Pod's restart policy and the container type.
Setting the RestartPolicy as "Always" for the init container will have the following effect:
Additionally, setting the RestartPolicy as "Always" for the init container will
have the following effect:
this init container will be continually restarted on
exit until all regular containers have terminated. Once all regular
containers have completed, all init containers with restartPolicy "Always"
@@ -1924,6 +1962,57 @@ spec:
init container is started, or after any startupProbe has successfully
completed.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. The rules are evaluated in
order. Once a rule matches a container exit condition, the remaining
rules are ignored. If no rule matches the container exit condition,
the Container-level restart policy determines the whether the container
is restarted or not. Constraints on the rules:
- At most 20 rules are allowed.
- Rules can have the same action.
- Identical rules are not forbidden in validations.
When rules are specified, container MUST set RestartPolicy explicitly
even it if matches the Pod's RestartPolicy.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
SecurityContext defines the security options the container should be run with.
@@ -2521,7 +2610,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -2575,6 +2666,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -2630,13 +2757,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -2656,7 +2783,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -2901,6 +3030,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: Probes are not allowed for ephemeral containers.
@@ -3274,7 +3409,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -3326,9 +3461,51 @@ spec:
description: |-
Restart policy for the container to manage the restart behavior of each
container within a pod.
This may only be set for init containers. You cannot set this field on
ephemeral containers.
You cannot set this field on ephemeral containers.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. You cannot set this field on
ephemeral containers.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
Optional: SecurityContext defines the security options the ephemeral container should be run with.
@@ -3847,7 +4024,9 @@ spec:
hostNetwork:
description: |-
Host networking requested for this pod. Use the host's network namespace.
If this option is set, the ports that will be used must be specified.
When using HostNetwork you should specify ports so the scheduler is aware.
When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
Default to false.
type: boolean
hostPID:
@@ -3872,6 +4051,19 @@ spec:
Specifies the hostname of the Pod
If not specified, the pod's hostname will be set to a system-defined value.
type: string
hostnameOverride:
description: |-
HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
This field only specifies the pod's hostname and does not affect its DNS records.
When this field is set to a non-empty string:
- It takes precedence over the values set in `hostname` and `subdomain`.
- The Pod's hostname will be set to this value.
- `setHostnameAsFQDN` must be nil or set to false.
- `hostNetwork` must be set to false.
This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
Requires the HostnameOverride feature gate to be enabled.
type: string
imagePullSecrets:
description: |-
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
@@ -3907,7 +4099,7 @@ spec:
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
The resourceRequirements of an init container are taken into account during scheduling
by finding the highest request/limit for each resource type, and then using the max of
of that value or the sum of the normal containers. Limits are applied to init containers
that value or the sum of the normal containers. Limits are applied to init containers
in a similar fashion.
Init containers cannot currently be added or removed.
Cannot be updated.
@@ -3951,7 +4143,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -4005,6 +4199,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -4060,13 +4290,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -4086,7 +4316,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -4335,6 +4567,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: |-
@@ -4725,7 +4963,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -4776,10 +5014,10 @@ spec:
restartPolicy:
description: |-
RestartPolicy defines the restart behavior of individual containers in a pod.
This field may only be set for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
This overrides the pod-level restart policy. When this field is not specified,
the restart behavior is defined by the Pod's restart policy and the container type.
Setting the RestartPolicy as "Always" for the init container will have the following effect:
Additionally, setting the RestartPolicy as "Always" for the init container will
have the following effect:
this init container will be continually restarted on
exit until all regular containers have terminated. Once all regular
containers have completed, all init containers with restartPolicy "Always"
@@ -4791,6 +5029,57 @@ spec:
init container is started, or after any startupProbe has successfully
completed.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. The rules are evaluated in
order. Once a rule matches a container exit condition, the remaining
rules are ignored. If no rule matches the container exit condition,
the Container-level restart policy determines the whether the container
is restarted or not. Constraints on the rules:
- At most 20 rules are allowed.
- Rules can have the same action.
- Identical rules are not forbidden in validations.
When rules are specified, container MUST set RestartPolicy explicitly
even it if matches the Pod's RestartPolicy.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
SecurityContext defines the security options the container should be run with.
@@ -5304,6 +5593,7 @@ spec:
- spec.hostPID
- spec.hostIPC
- spec.hostUsers
- spec.resources
- spec.securityContext.appArmorProfile
- spec.securityContext.seLinuxOptions
- spec.securityContext.seccompProfile
@@ -5455,7 +5745,7 @@ spec:
description: |-
Resources is the total amount of CPU and Memory resources required by all
containers in the pod. It supports specifying Requests and Limits for
"cpu" and "memory" resource names only. ResourceClaims are not supported.
"cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
This field enables fine-grained control over resource allocation for the
entire pod, allowing resource sharing among containers in a pod.
@@ -5468,7 +5758,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -5996,7 +6286,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -6007,7 +6296,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -6713,15 +7001,13 @@ spec:
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
If specified, the CSI driver will create or update the volume with the attributes defined
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
will be set by the persistentvolume controller if it exists.
it can be changed after the claim is created. An empty string or nil value indicates that no
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
this field can be reset to its previous value (including nil) to cancel the modification.
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -6895,12 +7181,9 @@ spec:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
description: |-
endpoints is the endpoint name that details Glusterfs topology.
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
description: endpoints is the endpoint name that details Glusterfs topology.
type: string
path:
description: |-
@@ -6954,7 +7237,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -6979,7 +7262,7 @@ spec:
description: |-
iscsi represents an ISCSI Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
More info: https://examples.k8s.io/volumes/iscsi/README.md
More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
properties:
chapAuthDiscovery:
description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
@@ -7369,6 +7652,110 @@ spec:
type: array
x-kubernetes-list-type: atomic
type: object
podCertificate:
description: |-
Projects an auto-rotating credential bundle (private key and certificate
chain) that the pod can use either as a TLS client or server.
Kubelet generates a private key and uses it to send a
PodCertificateRequest to the named signer. Once the signer approves the
request and issues a certificate chain, Kubelet writes the key and
certificate chain to the pod filesystem. The pod does not start until
certificates have been issued for each podCertificate projected volume
source in its spec.
Kubelet will begin trying to rotate the certificate at the time indicated
by the signer using the PodCertificateRequest.Status.BeginRefreshAt
timestamp.
Kubelet can write a single file, indicated by the credentialBundlePath
field, or separate files, indicated by the keyPath and
certificateChainPath fields.
The credential bundle is a single file in PEM format. The first PEM
entry is the private key (in PKCS#8 format), and the remaining PEM
entries are the certificate chain issued by the signer (typically,
signers will return their certificate chain in leaf-to-root order).
Prefer using the credential bundle format, since your application code
can read it atomically. If you use keyPath and certificateChainPath,
your application must make two separate file reads. If these coincide
with a certificate rotation, it is possible that the private key and leaf
certificate you read may not correspond to each other. Your application
will need to check for this condition, and re-read until they are
consistent.
The named signer controls chooses the format of the certificate it
issues; consult the signer implementation's documentation to learn how to
use the certificates it issues.
properties:
certificateChainPath:
description: |-
Write the certificate chain at this path in the projected volume.
Most applications should use credentialBundlePath. When using keyPath
and certificateChainPath, your application needs to check that the key
and leaf certificate are consistent, because it is possible to read the
files mid-rotation.
type: string
credentialBundlePath:
description: |-
Write the credential bundle at this path in the projected volume.
The credential bundle is a single file that contains multiple PEM blocks.
The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
key.
The remaining blocks are CERTIFICATE blocks, containing the issued
certificate chain from the signer (leaf and any intermediates).
Using credentialBundlePath lets your Pod's application code make a single
atomic read that retrieves a consistent key and certificate chain. If you
project them to separate files, your application code will need to
additionally check that the leaf certificate was issued to the key.
type: string
keyPath:
description: |-
Write the key at this path in the projected volume.
Most applications should use credentialBundlePath. When using keyPath
and certificateChainPath, your application needs to check that the key
and leaf certificate are consistent, because it is possible to read the
files mid-rotation.
type: string
keyType:
description: |-
The type of keypair Kubelet will generate for the pod.
Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
"ECDSAP521", and "ED25519".
type: string
maxExpirationSeconds:
description: |-
maxExpirationSeconds is the maximum lifetime permitted for the
certificate.
Kubelet copies this value verbatim into the PodCertificateRequests it
generates for this projection.
If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
will reject values shorter than 3600 (1 hour). The maximum allowable
value is 7862400 (91 days).
The signer implementation is then free to issue a certificate with any
lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
seconds (1 hour). This constraint is enforced by kube-apiserver.
`kubernetes.io` signers will never issue certificates with a lifetime
longer than 24 hours.
format: int32
type: integer
signerName:
description: Kubelet's generated CSRs will be addressed to this signer.
type: string
required:
- keyType
- signerName
type: object
secret:
description: secret information about the secret data to project
properties:
@@ -7498,7 +7885,6 @@ spec:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
description: |-
@@ -7778,6 +8164,53 @@ spec:
required:
- containers
type: object
vaultConfig:
properties:
azureKeyVault:
properties:
certificatePath:
type: string
clientId:
type: string
tenantId:
type: string
url:
type: string
required:
- certificatePath
- clientId
- tenantId
- url
type: object
proxy:
properties:
http:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
https:
properties:
credentialSecretRef:
type: string
url:
description: Required
type: string
type: object
noProxy:
items:
type: string
type: array
type: object
type:
description: |-
VaultType represents the type of vault that can be used in the application.
It is used to identify which vault integration should be used to resolve secrets.
type: string
type: object
required:
- githubConfigSecret
- githubConfigUrl
@@ -7812,4 +8245,3 @@ spec:
storage: true
subresources:
status: {}
preserveUnknownFields: false

View File

@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.17.2
controller-gen.kubebuilder.io/version: v0.19.0
name: horizontalrunnerautoscalers.actions.summerwind.dev
spec:
group: actions.summerwind.dev
@@ -12,306 +12,313 @@ spec:
listKind: HorizontalRunnerAutoscalerList
plural: horizontalrunnerautoscalers
shortNames:
- hra
- hra
singular: horizontalrunnerautoscaler
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.minReplicas
name: Min
type: number
- jsonPath: .spec.maxReplicas
name: Max
type: number
- jsonPath: .status.desiredReplicas
name: Desired
type: number
- jsonPath: .status.scheduledOverridesSummary
name: Schedule
type: string
name: v1alpha1
schema:
openAPIV3Schema:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: HorizontalRunnerAutoscalerSpec defines the desired state of HorizontalRunnerAutoscaler
properties:
capacityReservations:
items:
description: |-
CapacityReservation specifies the number of replicas temporarily added
to the scale target until ExpirationTime.
properties:
effectiveTime:
format: date-time
type: string
expirationTime:
format: date-time
type: string
name:
type: string
replicas:
type: integer
type: object
type: array
githubAPICredentialsFrom:
properties:
secretRef:
properties:
name:
type: string
required:
- name
type: object
type: object
maxReplicas:
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
type: integer
metrics:
description: Metrics is the collection of various metric targets to calculate desired number of runners
items:
properties:
repositoryNames:
description: |-
RepositoryNames is the list of repository names to be used for calculating the metric.
For example, a repository name is the REPO part of `github.com/USER/REPO`.
items:
type: string
type: array
scaleDownAdjustment:
description: |-
ScaleDownAdjustment is the number of runners removed on scale-down.
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
type: integer
scaleDownFactor:
description: |-
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be removed.
type: string
scaleDownThreshold:
description: |-
ScaleDownThreshold is the percentage of busy runners less than which will
trigger the hpa to scale the runners down.
type: string
scaleUpAdjustment:
description: |-
ScaleUpAdjustment is the number of runners added on scale-up.
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
type: integer
scaleUpFactor:
description: |-
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be added.
type: string
scaleUpThreshold:
description: |-
ScaleUpThreshold is the percentage of busy runners greater than which will
trigger the hpa to scale runners up.
type: string
type:
description: |-
Type is the type of metric to be used for autoscaling.
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
type: string
type: object
type: array
minReplicas:
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
type: integer
scaleDownDelaySecondsAfterScaleOut:
- additionalPrinterColumns:
- jsonPath: .spec.minReplicas
name: Min
type: number
- jsonPath: .spec.maxReplicas
name: Max
type: number
- jsonPath: .status.desiredReplicas
name: Desired
type: number
- jsonPath: .status.scheduledOverridesSummary
name: Schedule
type: string
name: v1alpha1
schema:
openAPIV3Schema:
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: HorizontalRunnerAutoscalerSpec defines the desired state
of HorizontalRunnerAutoscaler
properties:
capacityReservations:
items:
description: |-
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
Used to prevent flapping (down->up->down->... loop)
type: integer
scaleTargetRef:
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
CapacityReservation specifies the number of replicas temporarily added
to the scale target until ExpirationTime.
properties:
kind:
description: Kind is the type of resource being referenced
enum:
- RunnerDeployment
- RunnerSet
effectiveTime:
format: date-time
type: string
expirationTime:
format: date-time
type: string
name:
description: Name is the name of resource being referenced
type: string
replicas:
type: integer
type: object
scaleUpTriggers:
description: |-
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
on each webhook requested received by the webhookBasedAutoscaler.
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
items:
type: array
githubAPICredentialsFrom:
properties:
secretRef:
properties:
amount:
type: integer
duration:
type: string
githubEvent:
properties:
checkRun:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties:
names:
description: |-
Names is a list of GitHub Actions glob patterns.
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to trigger depending on the job.
items:
type: string
type: array
repositories:
description: |-
Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
items:
type: string
type: array
status:
type: string
types:
description: 'One of: created, rerequested, or completed'
items:
type: string
type: array
type: object
pullRequest:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
properties:
branches:
items:
type: string
type: array
types:
items:
type: string
type: array
type: object
push:
description: |-
PushSpec is the condition for triggering scale-up on push event
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object
workflowJob:
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
type: object
type: object
type: object
type: array
scheduledOverrides:
description: |-
ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
The earlier a scheduled override is, the higher it is prioritized.
items:
description: |-
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
properties:
endTime:
description: EndTime is the time at which the first override ends.
format: date-time
type: string
minReplicas:
description: |-
MinReplicas is the number of runners while overriding.
If omitted, it doesn't override minReplicas.
minimum: 0
nullable: true
type: integer
recurrenceRule:
properties:
frequency:
description: |-
Frequency is the name of a predefined interval of each recurrence.
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
If empty, the corresponding override happens only once.
enum:
- Daily
- Weekly
- Monthly
- Yearly
type: string
untilTime:
description: |-
UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
format: date-time
type: string
type: object
startTime:
description: StartTime is the time at which the first override starts.
format: date-time
name:
type: string
required:
- endTime
- startTime
- name
type: object
type: array
type: object
status:
properties:
cacheEntries:
items:
properties:
expirationTime:
format: date-time
type: object
maxReplicas:
description: MaxReplicas is the maximum number of replicas the deployment
is allowed to scale
type: integer
metrics:
description: Metrics is the collection of various metric targets to
calculate desired number of runners
items:
properties:
repositoryNames:
description: |-
RepositoryNames is the list of repository names to be used for calculating the metric.
For example, a repository name is the REPO part of `github.com/USER/REPO`.
items:
type: string
key:
type: string
value:
type: integer
type: object
type: array
desiredReplicas:
type: array
scaleDownAdjustment:
description: |-
ScaleDownAdjustment is the number of runners removed on scale-down.
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
type: integer
scaleDownFactor:
description: |-
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be removed.
type: string
scaleDownThreshold:
description: |-
ScaleDownThreshold is the percentage of busy runners less than which will
trigger the hpa to scale the runners down.
type: string
scaleUpAdjustment:
description: |-
ScaleUpAdjustment is the number of runners added on scale-up.
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
type: integer
scaleUpFactor:
description: |-
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
to determine how many pods should be added.
type: string
scaleUpThreshold:
description: |-
ScaleUpThreshold is the percentage of busy runners greater than which will
trigger the hpa to scale runners up.
type: string
type:
description: |-
Type is the type of metric to be used for autoscaling.
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
type: string
type: object
type: array
minReplicas:
description: MinReplicas is the minimum number of replicas the deployment
is allowed to scale
type: integer
scaleDownDelaySecondsAfterScaleOut:
description: |-
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
Used to prevent flapping (down->up->down->... loop)
type: integer
scaleTargetRef:
description: ScaleTargetRef is the reference to scaled resource like
RunnerDeployment
properties:
kind:
description: Kind is the type of resource being referenced
enum:
- RunnerDeployment
- RunnerSet
type: string
name:
description: Name is the name of resource being referenced
type: string
type: object
scaleUpTriggers:
description: |-
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
on each webhook requested received by the webhookBasedAutoscaler.
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
Note that the added runners remain until the next sync period at least,
and they may or may not be used by GitHub Actions depending on the timing.
They are intended to be used to gain "resource slack" immediately after you
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
items:
properties:
amount:
type: integer
duration:
type: string
githubEvent:
properties:
checkRun:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
properties:
names:
description: |-
Names is a list of GitHub Actions glob patterns.
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
So it is very likely that you can utilize this to trigger depending on the job.
items:
type: string
type: array
repositories:
description: |-
Repositories is a list of GitHub repositories.
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
items:
type: string
type: array
status:
type: string
types:
description: 'One of: created, rerequested, or completed'
items:
type: string
type: array
type: object
pullRequest:
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
properties:
branches:
items:
type: string
type: array
types:
items:
type: string
type: array
type: object
push:
description: |-
PushSpec is the condition for triggering scale-up on push event
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
type: object
workflowJob:
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
type: object
type: object
type: object
type: array
scheduledOverrides:
description: |-
ScheduledOverrides is the list of ScheduledOverride.
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
The earlier a scheduled override is, the higher it is prioritized.
items:
description: |-
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
type: integer
lastSuccessfulScaleOutTime:
format: date-time
nullable: true
type: string
observedGeneration:
description: |-
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
RunnerDeployment's generation, which is updated on mutation by the API Server.
format: int64
type: integer
scheduledOverridesSummary:
description: |-
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
for observability.
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}
preserveUnknownFields: false
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
properties:
endTime:
description: EndTime is the time at which the first override
ends.
format: date-time
type: string
minReplicas:
description: |-
MinReplicas is the number of runners while overriding.
If omitted, it doesn't override minReplicas.
minimum: 0
nullable: true
type: integer
recurrenceRule:
properties:
frequency:
description: |-
Frequency is the name of a predefined interval of each recurrence.
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
If empty, the corresponding override happens only once.
enum:
- Daily
- Weekly
- Monthly
- Yearly
type: string
untilTime:
description: |-
UntilTime is the time of the final recurrence.
If empty, the schedule recurs forever.
format: date-time
type: string
type: object
startTime:
description: StartTime is the time at which the first override
starts.
format: date-time
type: string
required:
- endTime
- startTime
type: object
type: array
type: object
status:
properties:
cacheEntries:
items:
properties:
expirationTime:
format: date-time
type: string
key:
type: string
value:
type: integer
type: object
type: array
desiredReplicas:
description: |-
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
type: integer
lastSuccessfulScaleOutTime:
format: date-time
nullable: true
type: string
observedGeneration:
description: |-
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
RunnerDeployment's generation, which is updated on mutation by the API Server.
format: int64
type: integer
scheduledOverridesSummary:
description: |-
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
for observability.
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.17.2
controller-gen.kubebuilder.io/version: v0.19.0
name: runnersets.actions.summerwind.dev
spec:
group: actions.summerwind.dev
@@ -554,7 +554,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -569,7 +568,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -730,7 +728,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -745,7 +742,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -834,8 +830,8 @@ spec:
most preferred is the one with the greatest sum of weights, i.e.
for each node that meets all of the scheduling requirements (resource
request, requiredDuringScheduling anti-affinity expressions, etc.),
compute a sum by iterating through the elements of this field and adding
"weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
compute a sum by iterating through the elements of this field and subtracting
"weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
node(s) with the highest sum are the most preferred.
items:
description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
@@ -899,7 +895,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -914,7 +909,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1075,7 +1069,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both matchLabelKeys and labelSelector.
Also, matchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1090,7 +1083,6 @@ spec:
pod labels will be ignored. The default value is empty.
The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
items:
type: string
type: array
@@ -1217,7 +1209,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -1271,6 +1265,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -1326,13 +1356,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -1352,7 +1382,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -1601,6 +1633,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: |-
@@ -1991,7 +2029,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -2042,10 +2080,10 @@ spec:
restartPolicy:
description: |-
RestartPolicy defines the restart behavior of individual containers in a pod.
This field may only be set for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
This overrides the pod-level restart policy. When this field is not specified,
the restart behavior is defined by the Pod's restart policy and the container type.
Setting the RestartPolicy as "Always" for the init container will have the following effect:
Additionally, setting the RestartPolicy as "Always" for the init container will
have the following effect:
this init container will be continually restarted on
exit until all regular containers have terminated. Once all regular
containers have completed, all init containers with restartPolicy "Always"
@@ -2057,6 +2095,57 @@ spec:
init container is started, or after any startupProbe has successfully
completed.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. The rules are evaluated in
order. Once a rule matches a container exit condition, the remaining
rules are ignored. If no rule matches the container exit condition,
the Container-level restart policy determines the whether the container
is restarted or not. Constraints on the rules:
- At most 20 rules are allowed.
- Rules can have the same action.
- Identical rules are not forbidden in validations.
When rules are specified, container MUST set RestartPolicy explicitly
even it if matches the Pod's RestartPolicy.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
SecurityContext defines the security options the container should be run with.
@@ -2654,7 +2743,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -2708,6 +2799,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -2763,13 +2890,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -2789,7 +2916,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -3034,6 +3163,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: Probes are not allowed for ephemeral containers.
@@ -3407,7 +3542,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -3459,9 +3594,51 @@ spec:
description: |-
Restart policy for the container to manage the restart behavior of each
container within a pod.
This may only be set for init containers. You cannot set this field on
ephemeral containers.
You cannot set this field on ephemeral containers.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. You cannot set this field on
ephemeral containers.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
Optional: SecurityContext defines the security options the ephemeral container should be run with.
@@ -3980,7 +4157,9 @@ spec:
hostNetwork:
description: |-
Host networking requested for this pod. Use the host's network namespace.
If this option is set, the ports that will be used must be specified.
When using HostNetwork you should specify ports so the scheduler is aware.
When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
Default to false.
type: boolean
hostPID:
@@ -4005,6 +4184,19 @@ spec:
Specifies the hostname of the Pod
If not specified, the pod's hostname will be set to a system-defined value.
type: string
hostnameOverride:
description: |-
HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
This field only specifies the pod's hostname and does not affect its DNS records.
When this field is set to a non-empty string:
- It takes precedence over the values set in `hostname` and `subdomain`.
- The Pod's hostname will be set to this value.
- `setHostnameAsFQDN` must be nil or set to false.
- `hostNetwork` must be set to false.
This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
Requires the HostnameOverride feature gate to be enabled.
type: string
imagePullSecrets:
description: |-
ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
@@ -4040,7 +4232,7 @@ spec:
Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
The resourceRequirements of an init container are taken into account during scheduling
by finding the highest request/limit for each resource type, and then using the max of
of that value or the sum of the normal containers. Limits are applied to init containers
that value or the sum of the normal containers. Limits are applied to init containers
in a similar fashion.
Init containers cannot currently be added or removed.
Cannot be updated.
@@ -4084,7 +4276,9 @@ spec:
description: EnvVar represents an environment variable present in a Container.
properties:
name:
description: Name of the environment variable. Must be a C_IDENTIFIER.
description: |-
Name of the environment variable.
May consist of any printable ASCII characters except '='.
type: string
value:
description: |-
@@ -4138,6 +4332,42 @@ spec:
- fieldPath
type: object
x-kubernetes-map-type: atomic
fileKeyRef:
description: |-
FileKeyRef selects a key of the env file.
Requires the EnvFiles feature gate to be enabled.
properties:
key:
description: |-
The key within the env file. An invalid key will prevent the pod from starting.
The keys defined within a source may consist of any printable ASCII characters except '='.
During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
type: string
optional:
default: false
description: |-
Specify whether the file or its key must be defined. If the file or key
does not exist, then the env var is not published.
If optional is set to true and the specified key does not exist,
the environment variable will not be set in the Pod's containers.
If optional is set to false and the specified key does not exist,
an error will be returned during Pod creation.
type: boolean
path:
description: |-
The path within the volume from which to select the file.
Must be relative and may not contain the '..' path or start with '..'.
type: string
volumeName:
description: The name of the volume mount containing the env file.
type: string
required:
- key
- path
- volumeName
type: object
x-kubernetes-map-type: atomic
resourceFieldRef:
description: |-
Selects a resource of the container: only resources limits and requests
@@ -4193,13 +4423,13 @@ spec:
envFrom:
description: |-
List of sources to populate environment variables in the container.
The keys defined within a source must be a C_IDENTIFIER. All invalid keys
will be reported as an event when the container is starting. When a key exists in multiple
The keys defined within a source may consist of any printable ASCII characters except '='.
When a key exists in multiple
sources, the value associated with the last source will take precedence.
Values defined by an Env with a duplicate key will take precedence.
Cannot be updated.
items:
description: EnvFromSource represents the source of a set of ConfigMaps
description: EnvFromSource represents the source of a set of ConfigMaps or Secrets
properties:
configMapRef:
description: The ConfigMap to select from
@@ -4219,7 +4449,9 @@ spec:
type: object
x-kubernetes-map-type: atomic
prefix:
description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
description: |-
Optional text to prepend to the name of each environment variable.
May consist of any printable ASCII characters except '='.
type: string
secretRef:
description: The Secret to select from
@@ -4468,6 +4700,12 @@ spec:
- port
type: object
type: object
stopSignal:
description: |-
StopSignal defines which signal will be sent to a container when it is being stopped.
If not specified, the default is defined by the container runtime in use.
StopSignal can only be set for Pods with a non-empty .spec.os.name
type: string
type: object
livenessProbe:
description: |-
@@ -4858,7 +5096,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -4909,10 +5147,10 @@ spec:
restartPolicy:
description: |-
RestartPolicy defines the restart behavior of individual containers in a pod.
This field may only be set for init containers, and the only allowed value is "Always".
For non-init containers or when this field is not specified,
This overrides the pod-level restart policy. When this field is not specified,
the restart behavior is defined by the Pod's restart policy and the container type.
Setting the RestartPolicy as "Always" for the init container will have the following effect:
Additionally, setting the RestartPolicy as "Always" for the init container will
have the following effect:
this init container will be continually restarted on
exit until all regular containers have terminated. Once all regular
containers have completed, all init containers with restartPolicy "Always"
@@ -4924,6 +5162,57 @@ spec:
init container is started, or after any startupProbe has successfully
completed.
type: string
restartPolicyRules:
description: |-
Represents a list of rules to be checked to determine if the
container should be restarted on exit. The rules are evaluated in
order. Once a rule matches a container exit condition, the remaining
rules are ignored. If no rule matches the container exit condition,
the Container-level restart policy determines the whether the container
is restarted or not. Constraints on the rules:
- At most 20 rules are allowed.
- Rules can have the same action.
- Identical rules are not forbidden in validations.
When rules are specified, container MUST set RestartPolicy explicitly
even it if matches the Pod's RestartPolicy.
items:
description: ContainerRestartRule describes how a container exit is handled.
properties:
action:
description: |-
Specifies the action taken on a container exit if the requirements
are satisfied. The only possible value is "Restart" to restart the
container.
type: string
exitCodes:
description: Represents the exit codes to check on container exits.
properties:
operator:
description: |-
Represents the relationship between the container exit code(s) and the
specified values. Possible values are:
- In: the requirement is satisfied if the container exit code is in the
set of specified values.
- NotIn: the requirement is satisfied if the container exit code is
not in the set of specified values.
type: string
values:
description: |-
Specifies the set of values to check for container exit codes.
At most 255 elements are allowed.
items:
format: int32
type: integer
type: array
x-kubernetes-list-type: set
required:
- operator
type: object
required:
- action
type: object
type: array
x-kubernetes-list-type: atomic
securityContext:
description: |-
SecurityContext defines the security options the container should be run with.
@@ -5437,6 +5726,7 @@ spec:
- spec.hostPID
- spec.hostIPC
- spec.hostUsers
- spec.resources
- spec.securityContext.appArmorProfile
- spec.securityContext.seLinuxOptions
- spec.securityContext.seccompProfile
@@ -5588,7 +5878,7 @@ spec:
description: |-
Resources is the total amount of CPU and Memory resources required by all
containers in the pod. It supports specifying Requests and Limits for
"cpu" and "memory" resource names only. ResourceClaims are not supported.
"cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
This field enables fine-grained control over resource allocation for the
entire pod, allowing resource sharing among containers in a pod.
@@ -5601,7 +5891,7 @@ spec:
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.
This is an alpha field and requires enabling the
This field depends on the
DynamicResourceAllocation feature gate.
This field is immutable. It can only be set for containers.
@@ -6126,7 +6416,6 @@ spec:
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
If this value is nil, the behavior is equivalent to the Honor policy.
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
nodeTaintsPolicy:
description: |-
@@ -6137,7 +6426,6 @@ spec:
- Ignore: node taints are ignored. All nodes are included.
If this value is nil, the behavior is equivalent to the Ignore policy.
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
type: string
topologyKey:
description: |-
@@ -6843,15 +7131,13 @@ spec:
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
If specified, the CSI driver will create or update the volume with the attributes defined
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
will be set by the persistentvolume controller if it exists.
it can be changed after the claim is created. An empty string or nil value indicates that no
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
this field can be reset to its previous value (including nil) to cancel the modification.
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -7025,12 +7311,9 @@ spec:
description: |-
glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
More info: https://examples.k8s.io/volumes/glusterfs/README.md
properties:
endpoints:
description: |-
endpoints is the endpoint name that details Glusterfs topology.
More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
description: endpoints is the endpoint name that details Glusterfs topology.
type: string
path:
description: |-
@@ -7084,7 +7367,7 @@ spec:
The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
The volume will be mounted read-only (ro) and non-executable files (noexec).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
properties:
pullPolicy:
@@ -7109,7 +7392,7 @@ spec:
description: |-
iscsi represents an ISCSI Disk resource that is attached to a
kubelet's host machine and then exposed to the pod.
More info: https://examples.k8s.io/volumes/iscsi/README.md
More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
properties:
chapAuthDiscovery:
description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
@@ -7499,6 +7782,110 @@ spec:
type: array
x-kubernetes-list-type: atomic
type: object
podCertificate:
description: |-
Projects an auto-rotating credential bundle (private key and certificate
chain) that the pod can use either as a TLS client or server.
Kubelet generates a private key and uses it to send a
PodCertificateRequest to the named signer. Once the signer approves the
request and issues a certificate chain, Kubelet writes the key and
certificate chain to the pod filesystem. The pod does not start until
certificates have been issued for each podCertificate projected volume
source in its spec.
Kubelet will begin trying to rotate the certificate at the time indicated
by the signer using the PodCertificateRequest.Status.BeginRefreshAt
timestamp.
Kubelet can write a single file, indicated by the credentialBundlePath
field, or separate files, indicated by the keyPath and
certificateChainPath fields.
The credential bundle is a single file in PEM format. The first PEM
entry is the private key (in PKCS#8 format), and the remaining PEM
entries are the certificate chain issued by the signer (typically,
signers will return their certificate chain in leaf-to-root order).
Prefer using the credential bundle format, since your application code
can read it atomically. If you use keyPath and certificateChainPath,
your application must make two separate file reads. If these coincide
with a certificate rotation, it is possible that the private key and leaf
certificate you read may not correspond to each other. Your application
will need to check for this condition, and re-read until they are
consistent.
The named signer controls chooses the format of the certificate it
issues; consult the signer implementation's documentation to learn how to
use the certificates it issues.
properties:
certificateChainPath:
description: |-
Write the certificate chain at this path in the projected volume.
Most applications should use credentialBundlePath. When using keyPath
and certificateChainPath, your application needs to check that the key
and leaf certificate are consistent, because it is possible to read the
files mid-rotation.
type: string
credentialBundlePath:
description: |-
Write the credential bundle at this path in the projected volume.
The credential bundle is a single file that contains multiple PEM blocks.
The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
key.
The remaining blocks are CERTIFICATE blocks, containing the issued
certificate chain from the signer (leaf and any intermediates).
Using credentialBundlePath lets your Pod's application code make a single
atomic read that retrieves a consistent key and certificate chain. If you
project them to separate files, your application code will need to
additionally check that the leaf certificate was issued to the key.
type: string
keyPath:
description: |-
Write the key at this path in the projected volume.
Most applications should use credentialBundlePath. When using keyPath
and certificateChainPath, your application needs to check that the key
and leaf certificate are consistent, because it is possible to read the
files mid-rotation.
type: string
keyType:
description: |-
The type of keypair Kubelet will generate for the pod.
Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
"ECDSAP521", and "ED25519".
type: string
maxExpirationSeconds:
description: |-
maxExpirationSeconds is the maximum lifetime permitted for the
certificate.
Kubelet copies this value verbatim into the PodCertificateRequests it
generates for this projection.
If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
will reject values shorter than 3600 (1 hour). The maximum allowable
value is 7862400 (91 days).
The signer implementation is then free to issue a certificate with any
lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
seconds (1 hour). This constraint is enforced by kube-apiserver.
`kubernetes.io` signers will never issue certificates with a lifetime
longer than 24 hours.
format: int32
type: integer
signerName:
description: Kubelet's generated CSRs will be addressed to this signer.
type: string
required:
- keyType
- signerName
type: object
secret:
description: secret information about the secret data to project
properties:
@@ -7628,7 +8015,6 @@ spec:
description: |-
rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
More info: https://examples.k8s.io/volumes/rbd/README.md
properties:
fsType:
description: |-
@@ -8170,15 +8556,13 @@ spec:
volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
If specified, the CSI driver will create or update the volume with the attributes defined
in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
will be set by the persistentvolume controller if it exists.
it can be changed after the claim is created. An empty string or nil value indicates that no
VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
this field can be reset to its previous value (including nil) to cancel the modification.
If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
exists.
More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
(Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
type: string
volumeMode:
description: |-
@@ -8278,13 +8662,11 @@ spec:
description: |-
currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
type: string
modifyVolumeStatus:
description: |-
ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
When this is unset, there is no ModifyVolume operation being attempted.
This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
properties:
status:
description: "status is the status of the ControllerModifyVolume operation. It can be in any of following states:\n - Pending\n Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as\n the specified VolumeAttributesClass not existing.\n - InProgress\n InProgress indicates that the volume is being modified.\n - Infeasible\n Infeasible indicates that the request has been rejected as invalid by the CSI driver. To\n\t resolve the error, a valid VolumeAttributesClass needs to be specified.\nNote: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately."
@@ -8355,7 +8737,6 @@ spec:
type: object
required:
- selector
- serviceName
- template
type: object
status:
@@ -8389,4 +8770,3 @@ spec:
storage: true
subresources:
status: {}
preserveUnknownFields: false

View File

@@ -32,6 +32,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
"github.com/actions/actions-runner-controller/github/actions"
hash "github.com/actions/actions-runner-controller/hash"
@@ -77,7 +78,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !autoscalingListener.ObjectMeta.DeletionTimestamp.IsZero() {
if !autoscalingListener.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(autoscalingListener, autoscalingListenerFinalizerName) {
return ctrl.Result{}, nil
}
@@ -128,41 +129,24 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
return ctrl.Result{}, err
}
// Check if the GitHub config secret exists
secret := new(corev1.Secret)
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Spec.GitHubConfigSecret}, secret); err != nil {
log.Error(err, "Failed to find GitHub config secret.",
"namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
"name", autoscalingListener.Spec.GitHubConfigSecret)
appConfig, err := r.GetAppConfig(ctx, &autoscalingRunnerSet)
if err != nil {
log.Error(
err,
"Failed to get app config for AutoscalingRunnerSet.",
"namespace",
autoscalingRunnerSet.Namespace,
"name",
autoscalingRunnerSet.GitHubConfigSecret,
)
return ctrl.Result{}, err
}
// Create a mirror secret in the same namespace as the AutoscalingListener
mirrorSecret := new(corev1.Secret)
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerSecretMirrorName(autoscalingListener)}, mirrorSecret); err != nil {
if !kerrors.IsNotFound(err) {
log.Error(err, "Unable to get listener secret mirror", "namespace", autoscalingListener.Namespace, "name", scaleSetListenerSecretMirrorName(autoscalingListener))
return ctrl.Result{}, err
}
// Create a mirror secret for the listener pod in the Controller namespace for listener pod to use
log.Info("Creating a mirror listener secret for the listener pod")
return r.createSecretsForListener(ctx, autoscalingListener, secret, log)
}
// make sure the mirror secret is up to date
mirrorSecretDataHash := mirrorSecret.Labels["secret-data-hash"]
secretDataHash := hash.ComputeTemplateHash(secret.Data)
if mirrorSecretDataHash != secretDataHash {
log.Info("Updating mirror listener secret for the listener pod", "mirrorSecretDataHash", mirrorSecretDataHash, "secretDataHash", secretDataHash)
return r.updateSecretsForListener(ctx, secret, mirrorSecret, log)
}
// Make sure the runner scale set listener service account is created for the listener pod in the controller namespace
serviceAccount := new(corev1.ServiceAccount)
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerServiceAccountName(autoscalingListener)}, serviceAccount); err != nil {
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: autoscalingListener.Name}, serviceAccount); err != nil {
if !kerrors.IsNotFound(err) {
log.Error(err, "Unable to get listener service accounts", "namespace", autoscalingListener.Namespace, "name", scaleSetListenerServiceAccountName(autoscalingListener))
log.Error(err, "Unable to get listener service accounts", "namespace", autoscalingListener.Namespace, "name", autoscalingListener.Name)
return ctrl.Result{}, err
}
@@ -175,9 +159,9 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
// Make sure the runner scale set listener role is created in the AutoscalingRunnerSet namespace
listenerRole := new(rbacv1.Role)
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRole); err != nil {
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRole); err != nil {
if !kerrors.IsNotFound(err) {
log.Error(err, "Unable to get listener role", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", scaleSetListenerRoleName(autoscalingListener))
log.Error(err, "Unable to get listener role", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", autoscalingListener.Name)
return ctrl.Result{}, err
}
@@ -197,9 +181,9 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
// Make sure the runner scale set listener role binding is created
listenerRoleBinding := new(rbacv1.RoleBinding)
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRoleBinding); err != nil {
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRoleBinding); err != nil {
if !kerrors.IsNotFound(err) {
log.Error(err, "Unable to get listener role binding", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", scaleSetListenerRoleName(autoscalingListener))
log.Error(err, "Unable to get listener role binding", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", autoscalingListener.Name)
return ctrl.Result{}, err
}
@@ -239,7 +223,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
// Create a listener pod in the controller namespace
log.Info("Creating a listener pod")
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, mirrorSecret, log)
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, appConfig, log)
}
cs := listenerContainerStatus(listenerPod)
@@ -260,6 +244,19 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return ctrl.Result{}, err
}
// delete the listener config secret as well, so it gets recreated when the listener pod is recreated, with any new data if it exists
var configSecret corev1.Secret
err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &configSecret)
switch {
case err == nil && configSecret.DeletionTimestamp.IsZero():
log.Info("Deleting the listener config secret")
if err := r.Delete(ctx, &configSecret); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to delete listener config secret: %w", err)
}
case !kerrors.IsNotFound(err):
return ctrl.Result{}, fmt.Errorf("failed to get the listener config secret: %w", err)
}
}
return ctrl.Result{}, nil
case cs.State.Running != nil:
@@ -281,7 +278,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerPod)
switch {
case err == nil:
if listenerPod.ObjectMeta.DeletionTimestamp.IsZero() {
if listenerPod.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener pod")
if err := r.Delete(ctx, listenerPod); err != nil {
return false, fmt.Errorf("failed to delete listener pod: %w", err)
@@ -299,7 +296,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &secret)
switch {
case err == nil:
if secret.ObjectMeta.DeletionTimestamp.IsZero() {
if secret.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener config secret")
if err := r.Delete(ctx, &secret); err != nil {
return false, fmt.Errorf("failed to delete listener config secret: %w", err)
@@ -316,7 +313,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
err = r.Get(ctx, types.NamespacedName{Name: proxyListenerSecretName(autoscalingListener), Namespace: autoscalingListener.Namespace}, proxySecret)
switch {
case err == nil:
if proxySecret.ObjectMeta.DeletionTimestamp.IsZero() {
if proxySecret.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener proxy secret")
if err := r.Delete(ctx, proxySecret); err != nil {
return false, fmt.Errorf("failed to delete listener proxy secret: %w", err)
@@ -330,10 +327,10 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
}
listenerRoleBinding := new(rbacv1.RoleBinding)
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRoleBinding)
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRoleBinding)
switch {
case err == nil:
if listenerRoleBinding.ObjectMeta.DeletionTimestamp.IsZero() {
if listenerRoleBinding.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener role binding")
if err := r.Delete(ctx, listenerRoleBinding); err != nil {
return false, fmt.Errorf("failed to delete listener role binding: %w", err)
@@ -346,10 +343,10 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
logger.Info("Listener role binding is deleted")
listenerRole := new(rbacv1.Role)
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRole)
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRole)
switch {
case err == nil:
if listenerRole.ObjectMeta.DeletionTimestamp.IsZero() {
if listenerRole.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener role")
if err := r.Delete(ctx, listenerRole); err != nil {
return false, fmt.Errorf("failed to delete listener role: %w", err)
@@ -363,10 +360,10 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
logger.Info("Cleaning up the listener service account")
listenerSa := new(corev1.ServiceAccount)
err = r.Get(ctx, types.NamespacedName{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, listenerSa)
err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerSa)
switch {
case err == nil:
if listenerSa.ObjectMeta.DeletionTimestamp.IsZero() {
if listenerSa.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener service account")
if err := r.Delete(ctx, listenerSa); err != nil {
return false, fmt.Errorf("failed to delete listener service account: %w", err)
@@ -382,7 +379,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
}
func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
newServiceAccount := r.ResourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
newServiceAccount := r.newScaleSetListenerServiceAccount(autoscalingListener)
if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil {
return ctrl.Result{}, err
@@ -398,7 +395,7 @@ func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx cont
return ctrl.Result{}, nil
}
func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, appConfig *appconfig.AppConfig, logger logr.Logger) (ctrl.Result, error) {
var envs []corev1.EnvVar
if autoscalingListener.Spec.Proxy != nil {
httpURL := corev1.EnvVar{
@@ -467,7 +464,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
logger.Info("Creating listener config secret")
podConfig, err := r.ResourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
podConfig, err := r.newScaleSetListenerConfig(autoscalingListener, appConfig, metricsConfig, cert)
if err != nil {
logger.Error(err, "Failed to build listener config secret")
return ctrl.Result{}, err
@@ -486,7 +483,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
return ctrl.Result{Requeue: true}, nil
}
newPod, err := r.ResourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
newPod, err := r.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, metricsConfig, envs...)
if err != nil {
logger.Error(err, "Failed to build listener pod")
return ctrl.Result{}, err
@@ -545,23 +542,6 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
return certificate, nil
}
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
newListenerSecret := r.ResourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
return ctrl.Result{}, err
}
logger.Info("Creating listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
if err := r.Create(ctx, newListenerSecret); err != nil {
logger.Error(err, "Unable to create listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
return ctrl.Result{}, err
}
logger.Info("Created listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
return ctrl.Result{Requeue: true}, nil
}
func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
data, err := autoscalingListener.Spec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) {
var secret corev1.Secret
@@ -601,24 +581,8 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a
return ctrl.Result{Requeue: true}, nil
}
func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
dataHash := hash.ComputeTemplateHash(secret.Data)
updatedMirrorSecret := mirrorSecret.DeepCopy()
updatedMirrorSecret.Labels["secret-data-hash"] = dataHash
updatedMirrorSecret.Data = secret.Data
logger.Info("Updating listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash)
if err := r.Update(ctx, updatedMirrorSecret); err != nil {
logger.Error(err, "Unable to update listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name)
return ctrl.Result{}, err
}
logger.Info("Updated listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash)
return ctrl.Result{Requeue: true}, nil
}
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
newRole := r.ResourceBuilder.newScaleSetListenerRole(autoscalingListener)
newRole := r.newScaleSetListenerRole(autoscalingListener)
logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
if err := r.Create(ctx, newRole); err != nil {
@@ -646,7 +610,7 @@ func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Contex
}
func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) {
newRoleBinding := r.ResourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
newRoleBinding := r.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
logger.Info("Creating listener role binding",
"namespace", newRoleBinding.Namespace,

View File

@@ -14,7 +14,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
listenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
ghalistenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/github/actions/fake"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
kerrors "k8s.io/apimachinery/pkg/api/errors"
@@ -43,10 +44,17 @@ var _ = Describe("Test AutoScalingListener controller", func() {
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
rb := ResourceBuilder{
SecretResolver: secretResolver,
}
controller := &AutoscalingListenerReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ResourceBuilder: rb,
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -134,37 +142,25 @@ var _ = Describe("Test AutoScalingListener controller", func() {
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
// Check if secret is created
mirrorSecret := new(corev1.Secret)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerSecretMirrorName(autoscalingListener), Namespace: autoscalingListener.Namespace}, mirrorSecret)
if err != nil {
return "", err
}
return string(mirrorSecret.Data["github_token"]), nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerTestGitHubToken), "Mirror secret should be created")
// Check if service account is created
serviceAccount := new(corev1.ServiceAccount)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, serviceAccount)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, serviceAccount)
if err != nil {
return "", err
}
return serviceAccount.Name, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerServiceAccountName(autoscalingListener)), "Service account should be created")
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListener.Name), "Service account should be created")
// Check if role is created
role := new(rbacv1.Role)
Eventually(
func() ([]rbacv1.PolicyRule, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
if err != nil {
return nil, err
}
@@ -178,7 +174,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
roleBinding := new(rbacv1.RoleBinding)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
if err != nil {
return "", err
}
@@ -186,7 +182,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
return roleBinding.RoleRef.Name, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerRoleName(autoscalingListener)), "Rolebinding should be created")
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListener.Name), "Rolebinding should be created")
// Check if pod is created
pod := new(corev1.Pod)
@@ -248,7 +244,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
Eventually(
func() bool {
roleBinding := new(rbacv1.RoleBinding)
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
return kerrors.IsNotFound(err)
},
autoscalingListenerTestTimeout,
@@ -259,7 +255,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
Eventually(
func() bool {
role := new(rbacv1.Role)
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
return kerrors.IsNotFound(err)
},
autoscalingListenerTestTimeout,
@@ -340,7 +336,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
role := new(rbacv1.Role)
Eventually(
func() ([]rbacv1.PolicyRule, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
if err != nil {
return nil, err
}
@@ -351,7 +347,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
autoscalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated")
})
It("It should re-create pod whenever listener container is terminated", func() {
It("It should re-create pod and config secret whenever listener container is terminated", func() {
// Waiting for the pod is created
pod := new(corev1.Pod)
Eventually(
@@ -367,7 +363,18 @@ var _ = Describe("Test AutoScalingListener controller", func() {
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
secret := new(corev1.Secret)
Eventually(
func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerConfigName(autoscalingListener), Namespace: autoscalingListener.Namespace}, secret)
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(Succeed(), "Config secret should be created")
oldPodUID := string(pod.UID)
oldSecretUID := string(secret.UID)
updated := pod.DeepCopy()
updated.Status.ContainerStatuses = []corev1.ContainerStatus{
{
@@ -396,75 +403,21 @@ var _ = Describe("Test AutoScalingListener controller", func() {
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be re-created")
})
It("It should update mirror secrets to match secret used by AutoScalingRunnerSet", func() {
// Waiting for the pod is created
pod := new(corev1.Pod)
// Check if config secret is re-created
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
secret := new(corev1.Secret)
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerConfigName(autoscalingListener), Namespace: autoscalingListener.Namespace}, secret)
if err != nil {
return "", err
}
return pod.Name, nil
return string(secret.UID), nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
// Update the secret
updatedSecret := configSecret.DeepCopy()
updatedSecret.Data["github_token"] = []byte(autoscalingListenerTestGitHubToken + "_updated")
err := k8sClient.Update(ctx, updatedSecret)
Expect(err).NotTo(HaveOccurred(), "failed to update test secret")
updatedPod := pod.DeepCopy()
// Ignore status running and consult the container state
updatedPod.Status.Phase = corev1.PodRunning
updatedPod.Status.ContainerStatuses = []corev1.ContainerStatus{
{
Name: autoscalingListenerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
},
},
},
}
err = k8sClient.Status().Update(ctx, updatedPod)
Expect(err).NotTo(HaveOccurred(), "failed to update test pod to failed")
// Check if mirror secret is updated with right data
mirrorSecret := new(corev1.Secret)
Eventually(
func() (map[string][]byte, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerSecretMirrorName(autoscalingListener), Namespace: autoscalingListener.Namespace}, mirrorSecret)
if err != nil {
return nil, err
}
return mirrorSecret.Data, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(updatedSecret.Data), "Mirror secret should be updated")
// Check if we re-created a new pod
Eventually(
func() error {
latestPod := new(corev1.Pod)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, latestPod)
if err != nil {
return err
}
if latestPod.UID == pod.UID {
return fmt.Errorf("Pod should be recreated")
}
return nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(Succeed(), "Pod should be recreated")
autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldSecretUID), "Config secret should be re-created")
})
})
})
@@ -507,10 +460,17 @@ var _ = Describe("Test AutoScalingListener customization", func() {
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
rb := ResourceBuilder{
SecretResolver: secretResolver,
}
controller := &AutoscalingListenerReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ResourceBuilder: rb,
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -780,11 +740,17 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() {
ctx = context.Background()
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
rb := ResourceBuilder{
SecretResolver: secretResolver,
}
controller := &AutoscalingListenerReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ResourceBuilder: rb,
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -977,10 +943,17 @@ var _ = Describe("Test AutoScalingListener controller with template modification
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
rb := ResourceBuilder{
SecretResolver: secretResolver,
}
controller := &AutoscalingListenerReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ResourceBuilder: rb,
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1073,6 +1046,12 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
rb := ResourceBuilder{
SecretResolver: secretResolver,
}
cert, err := os.ReadFile(filepath.Join(
"../../",
"github",
@@ -1094,9 +1073,10 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs")
controller := &AutoscalingListenerReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ResourceBuilder: rb,
}
err = controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1111,7 +1091,7 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
GitHubServerTLS: &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@@ -1147,7 +1127,7 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
GitHubServerTLS: &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@@ -1191,7 +1171,7 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
g.Expect(config.Data["config.json"]).ToNot(BeEmpty(), "listener configuration file should not be empty")
var listenerConfig listenerconfig.Config
var listenerConfig ghalistenerconfig.Config
err = json.Unmarshal(config.Data["config.json"], &listenerConfig)
g.Expect(err).NotTo(HaveOccurred(), "failed to parse listener configuration file")

View File

@@ -99,7 +99,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !autoscalingRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
if !autoscalingRunnerSet.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) {
return ctrl.Result{}, nil
}
@@ -151,7 +151,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, nil
}
if autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion] != build.Version {
if !v1alpha1.IsVersionAllowed(autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], build.Version) {
if err := r.Delete(ctx, autoscalingRunnerSet); err != nil {
log.Error(err, "Failed to delete autoscaling runner set on version mismatch",
"buildVersion", build.Version,
@@ -207,14 +207,6 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
}
secret := new(corev1.Secret)
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, secret); err != nil {
log.Error(err, "Failed to find GitHub config secret.",
"namespace", autoscalingRunnerSet.Namespace,
"name", autoscalingRunnerSet.Spec.GitHubConfigSecret)
return ctrl.Result{}, err
}
existingRunnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet)
if err != nil {
log.Error(err, "Failed to list existing ephemeral runner sets")
@@ -332,7 +324,7 @@ func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, au
err = r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, &listener)
switch {
case err == nil:
if listener.ObjectMeta.DeletionTimestamp.IsZero() {
if listener.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener")
if err := r.Delete(ctx, &listener); err != nil {
return false, fmt.Errorf("failed to delete listener: %w", err)
@@ -369,7 +361,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
for i := range oldRunnerSets {
rs := &oldRunnerSets[i]
// already deleted but contains finalizer so it still exists
if !rs.ObjectMeta.DeletionTimestamp.IsZero() {
if !rs.DeletionTimestamp.IsZero() {
logger.Info("Skip ephemeral runner set since it is already marked for deletion", "name", rs.Name)
continue
}
@@ -402,12 +394,12 @@ func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
logger.Info("Creating a new runner scale set")
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
if len(autoscalingRunnerSet.Spec.RunnerScaleSetName) == 0 {
autoscalingRunnerSet.Spec.RunnerScaleSetName = autoscalingRunnerSet.Name
}
if err != nil {
logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set")
logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set", "error", err.Error())
return ctrl.Result{}, err
}
@@ -498,7 +490,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
return ctrl.Result{}, err
}
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
if err != nil {
logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set")
return ctrl.Result{}, err
@@ -546,7 +538,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
return ctrl.Result{}, nil
}
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
if err != nil {
logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set")
return ctrl.Result{}, err
@@ -597,7 +589,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
return nil
}
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
if err != nil {
logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set")
return err
@@ -622,7 +614,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
}
func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) {
desiredRunnerSet, err := r.ResourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
desiredRunnerSet, err := r.newEphemeralRunnerSet(autoscalingRunnerSet)
if err != nil {
log.Error(err, "Could not create EphemeralRunnerSet")
return ctrl.Result{}, err
@@ -651,7 +643,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
})
}
autoscalingListener, err := r.ResourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
autoscalingListener, err := r.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
if err != nil {
log.Error(err, "Could not create AutoscalingListener spec")
return ctrl.Result{}, err
@@ -676,74 +668,6 @@ func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Con
return &EphemeralRunnerSets{list: list}, nil
}
func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (actions.ActionsService, error) {
var configSecret corev1.Secret
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, &configSecret); err != nil {
return nil, fmt.Errorf("failed to find GitHub config secret: %w", err)
}
opts, err := r.actionsClientOptionsFor(ctx, autoscalingRunnerSet)
if err != nil {
return nil, fmt.Errorf("failed to get actions client options: %w", err)
}
return r.ActionsClient.GetClientFromSecret(
ctx,
autoscalingRunnerSet.Spec.GitHubConfigUrl,
autoscalingRunnerSet.Namespace,
configSecret.Data,
opts...,
)
}
func (r *AutoscalingRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) ([]actions.ClientOption, error) {
var options []actions.ClientOption
if autoscalingRunnerSet.Spec.Proxy != nil {
proxyFunc, err := autoscalingRunnerSet.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
var secret corev1.Secret
err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: s}, &secret)
if err != nil {
return nil, fmt.Errorf("failed to get proxy secret %s: %w", s, err)
}
return &secret, nil
})
if err != nil {
return nil, fmt.Errorf("failed to get proxy func: %w", err)
}
options = append(options, actions.WithProxy(proxyFunc))
}
tlsConfig := autoscalingRunnerSet.Spec.GitHubServerTLS
if tlsConfig != nil {
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
var configmap corev1.ConfigMap
err := r.Get(
ctx,
types.NamespacedName{
Namespace: autoscalingRunnerSet.Namespace,
Name: name,
},
&configmap,
)
if err != nil {
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
}
return []byte(configmap.Data[key]), nil
})
if err != nil {
return nil, fmt.Errorf("failed to get tls config: %w", err)
}
options = append(options, actions.WithRootCAs(pool))
}
return options, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).

View File

@@ -70,7 +70,12 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
Log: logf.Log,
ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(),
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: fake.NewMultiClient(),
},
},
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -280,10 +285,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
// This should trigger re-creation of EphemeralRunnerSet and Listener
patched := autoscalingRunnerSet.DeepCopy()
patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
if patched.ObjectMeta.Annotations == nil {
patched.ObjectMeta.Annotations = make(map[string]string)
if patched.Annotations == nil {
patched.Annotations = make(map[string]string)
}
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "test-hash"
patched.Annotations[annotationKeyValuesHash] = "test-hash"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
autoscalingRunnerSet = patched.DeepCopy()
@@ -383,7 +388,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
Expect(err).NotTo(HaveOccurred(), "failed to get Listener")
patched = autoscalingRunnerSet.DeepCopy()
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "hash-changes"
patched.Annotations[annotationKeyValuesHash] = "hash-changes"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
@@ -546,10 +551,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
// Patch the AutoScalingRunnerSet image which should trigger
// the recreation of the Listener and EphemeralRunnerSet
patched := autoscalingRunnerSet.DeepCopy()
if patched.ObjectMeta.Annotations == nil {
patched.ObjectMeta.Annotations = make(map[string]string)
if patched.Annotations == nil {
patched.Annotations = make(map[string]string)
}
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "testgroup2"
patched.Annotations[annotationKeyValuesHash] = "testgroup2"
patched.Spec.Template.Spec = corev1.PodSpec{
Containers: []corev1.Container{
{
@@ -677,33 +682,40 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
multiClient := fake.NewMultiClient(
fake.WithDefaultClient(
fake.NewFakeClient(
fake.WithUpdateRunnerScaleSet(
&actions.RunnerScaleSet{
Id: 1,
Name: "testset_update",
RunnerGroupId: 1,
RunnerGroupName: "testgroup",
Labels: []actions.Label{{Type: "test", Name: "test"}},
RunnerSetting: actions.RunnerSetting{},
CreatedOn: time.Now(),
RunnerJitConfigUrl: "test.test.test",
Statistics: nil,
},
nil,
),
),
nil,
),
)
controller := &AutoscalingRunnerSetReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(
fake.WithDefaultClient(
fake.NewFakeClient(
fake.WithUpdateRunnerScaleSet(
&actions.RunnerScaleSet{
Id: 1,
Name: "testset_update",
RunnerGroupId: 1,
RunnerGroupName: "testgroup",
Labels: []actions.Label{{Type: "test", Name: "test"}},
RunnerSetting: actions.RunnerSetting{},
CreatedOn: time.Now(),
RunnerJitConfigUrl: "test.test.test",
Statistics: nil,
},
nil,
),
),
nil,
),
),
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: multiClient,
},
},
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -818,7 +830,12 @@ var _ = Describe("Test AutoscalingController creation failures", Ordered, func()
Log: logf.Log,
ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(),
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: fake.NewMultiClient(),
},
},
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -875,7 +892,7 @@ var _ = Describe("Test AutoscalingController creation failures", Ordered, func()
autoscalingRunnerSetTestInterval,
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
ars.ObjectMeta.Annotations = make(map[string]string)
ars.Annotations = make(map[string]string)
err = k8sClient.Update(ctx, ars)
Expect(err).NotTo(HaveOccurred(), "Update autoscaling runner set without annotation should be successful")
@@ -937,14 +954,19 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
ctx = context.Background()
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
multiClient := actions.NewMultiClient(logr.Discard())
controller = &AutoscalingRunnerSetReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: actions.NewMultiClient(logr.Discard()),
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: multiClient,
},
},
}
err := controller.SetupWithManager(mgr)
@@ -1127,7 +1149,12 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
Log: logf.Log,
ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(),
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: fake.NewMultiClient(),
},
},
}
err = controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1136,7 +1163,10 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
})
It("should be able to make requests to a server using root CAs", func() {
controller.ActionsClient = actions.NewMultiClient(logr.Discard())
controller.SecretResolver = &SecretResolver{
k8sClient: k8sClient,
multiClient: actions.NewMultiClient(logr.Discard()),
}
certsFolder := filepath.Join(
"../../",
@@ -1171,7 +1201,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: server.ConfigURLForOrg("my-org"),
GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
GitHubServerTLS: &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@@ -1224,7 +1254,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
GitHubServerTLS: &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@@ -1288,7 +1318,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
Spec: v1alpha1.AutoscalingRunnerSetSpec{
GitHubConfigUrl: "https://github.com/owner/repo",
GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
GitHubServerTLS: &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
@@ -1361,7 +1391,12 @@ var _ = Describe("Test external permissions cleanup", Ordered, func() {
Log: logf.Log,
ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(),
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: fake.NewMultiClient(),
},
},
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1519,7 +1554,12 @@ var _ = Describe("Test external permissions cleanup", Ordered, func() {
Log: logf.Log,
ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(),
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: fake.NewMultiClient(),
},
},
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1727,7 +1767,12 @@ var _ = Describe("Test resource version and build version mismatch", func() {
Log: logf.Log,
ControllerNamespace: autoscalingNS.Name,
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
ActionsClient: fake.NewMultiClient(),
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: k8sClient,
multiClient: fake.NewMultiClient(),
},
},
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")

View File

@@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"net/http"
"strconv"
"time"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
@@ -28,6 +29,7 @@ import (
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
@@ -44,12 +46,24 @@ const (
// EphemeralRunnerReconciler reconciles a EphemeralRunner object
type EphemeralRunnerReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
ActionsClient actions.MultiClient
Log logr.Logger
Scheme *runtime.Scheme
ResourceBuilder
}
// precompute backoff durations for failed ephemeral runners
// the len(failedRunnerBackoff) must be equal to maxFailures + 1
var failedRunnerBackoff = []time.Duration{
0,
5 * time.Second,
10 * time.Second,
20 * time.Second,
40 * time.Second,
80 * time.Second,
}
const maxFailures = 5
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/finalizers,verbs=get;list;watch;create;update;patch;delete
@@ -70,7 +84,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !ephemeralRunner.ObjectMeta.DeletionTimestamp.IsZero() {
if !ephemeralRunner.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
return ctrl.Result{}, nil
}
@@ -166,69 +180,119 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil
}
if ephemeralRunner.Status.RunnerId == 0 {
log.Info("Creating new ephemeral runner registration and updating status with runner config")
if r, err := r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log); r != nil {
return *r, err
}
}
secret := new(corev1.Secret)
if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
if !kerrors.IsNotFound(err) {
log.Error(err, "Failed to fetch secret")
return ctrl.Result{}, err
}
// create secret if not created
log.Info("Creating new ephemeral runner secret for jitconfig.")
if r, err := r.createSecret(ctx, ephemeralRunner, log); r != nil {
return *r, err
}
// Retry to get the secret that was just created.
// Otherwise, even though we want to continue to create the pod,
// it fails due to the missing secret resulting in an invalid pod spec.
if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
log.Error(err, "Failed to fetch secret")
jitConfig, err := r.createRunnerJitConfig(ctx, ephemeralRunner, log)
switch {
case err == nil:
// create secret if not created
log.Info("Creating new ephemeral runner secret for jitconfig.")
jitSecret, err := r.createSecret(ctx, ephemeralRunner, jitConfig, log)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to create secret: %w", err)
}
log.Info("Created new ephemeral runner secret for jitconfig.")
secret = jitSecret
case errors.Is(err, retryableError):
log.Info("Encountered retryable error, requeueing", "error", err.Error())
return ctrl.Result{Requeue: true}, nil
case errors.Is(err, fatalError):
log.Info("JIT config cannot be created for this ephemeral runner, issuing delete", "error", err.Error())
if err := r.Delete(ctx, ephemeralRunner); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to delete the ephemeral runner: %w", err)
}
log.Info("Request to delete ephemeral runner has been issued")
return ctrl.Result{}, nil
default:
log.Error(err, "Failed to create ephemeral runners secret", "error", err.Error())
return ctrl.Result{}, err
}
}
if ephemeralRunner.Status.RunnerId == 0 {
log.Info("Updating ephemeral runner status with runnerId and runnerName")
runnerID, err := strconv.Atoi(string(secret.Data["runnerId"]))
if err != nil {
log.Error(err, "Runner config secret is corrupted: missing runnerId")
log.Info("Deleting corrupted runner config secret")
if err := r.Delete(ctx, secret); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to delete the corrupted runner config secret")
}
log.Info("Corrupted runner config secret has been deleted")
return ctrl.Result{Requeue: true}, nil
}
runnerName := string(secret.Data["runnerName"])
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.RunnerId = runnerID
obj.Status.RunnerName = runnerName
}); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %w", err)
}
ephemeralRunner.Status.RunnerId = runnerID
ephemeralRunner.Status.RunnerName = runnerName
log.Info("Updated ephemeral runner status with runnerId and runnerName")
}
if len(ephemeralRunner.Status.Failures) > maxFailures {
log.Info(fmt.Sprintf("EphemeralRunner has failed more than %d times. Deleting ephemeral runner so it can be re-created", maxFailures))
if err := r.Delete(ctx, ephemeralRunner); err != nil {
log.Error(fmt.Errorf("failed to delete ephemeral runner after %d failures: %w", maxFailures, err), "Failed to delete ephemeral runner")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
now := metav1.Now()
lastFailure := ephemeralRunner.Status.LastFailure()
backoffDuration := failedRunnerBackoff[len(ephemeralRunner.Status.Failures)]
nextReconciliation := lastFailure.Add(backoffDuration)
if !lastFailure.IsZero() && now.Before(&metav1.Time{Time: nextReconciliation}) {
requeueAfter := nextReconciliation.Sub(now.Time)
log.Info("Backing off the next reconciliation due to failure",
"lastFailure", lastFailure,
"nextReconciliation", nextReconciliation,
"requeueAfter", requeueAfter,
)
return ctrl.Result{
Requeue: true,
RequeueAfter: requeueAfter,
}, nil
}
pod := new(corev1.Pod)
if err := r.Get(ctx, req.NamespacedName, pod); err != nil {
switch {
case !kerrors.IsNotFound(err):
if !kerrors.IsNotFound(err) {
log.Error(err, "Failed to fetch the pod")
return ctrl.Result{}, err
}
log.Info("Ephemeral runner pod does not exist. Creating new ephemeral runner")
case len(ephemeralRunner.Status.Failures) > 5:
log.Info("EphemeralRunner has failed more than 5 times. Marking it as failed")
errMessage := fmt.Sprintf("Pod has failed to start more than 5 times: %s", pod.Status.Message)
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonTooManyPodFailures, log); err != nil {
result, err := r.createPod(ctx, ephemeralRunner, secret, log)
switch {
case err == nil:
return result, nil
case kerrors.IsAlreadyExists(err):
log.Info("Runner pod already exists. Waiting for the pod event to be received")
return ctrl.Result{Requeue: true, RequeueAfter: 5 * time.Second}, nil
case kerrors.IsInvalid(err) || kerrors.IsForbidden(err):
log.Error(err, "Failed to create a pod due to unrecoverable failure")
errMessage := fmt.Sprintf("Failed to create the pod: %v", err)
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonInvalidPodFailure, log); err != nil {
log.Error(err, "Failed to set ephemeral runner to phase Failed")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
default:
// Pod was not found. Create if the pod has never been created
log.Info("Creating new EphemeralRunner pod.")
result, err := r.createPod(ctx, ephemeralRunner, secret, log)
switch {
case err == nil:
return result, nil
case kerrors.IsInvalid(err) || kerrors.IsForbidden(err):
log.Error(err, "Failed to create a pod due to unrecoverable failure")
errMessage := fmt.Sprintf("Failed to create the pod: %v", err)
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonInvalidPodFailure, log); err != nil {
log.Error(err, "Failed to set ephemeral runner to phase Failed")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
default:
log.Error(err, "Failed to create the pod")
return ctrl.Result{}, err
}
log.Error(err, "Failed to create the pod")
return ctrl.Result{}, err
}
}
@@ -262,34 +326,41 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
case cs.State.Terminated.ExitCode != 0: // failed
log.Info("Ephemeral runner container failed", "exitCode", cs.State.Terminated.ExitCode)
if ephemeralRunner.HasJob() {
log.Error(
errors.New("ephemeral runner has a job assigned, but the pod has failed"),
"Ephemeral runner either has faulty entrypoint or something external killing the runner",
)
log.Info("Deleting the ephemeral runner that has a job assigned but the pod has failed")
if err := r.Delete(ctx, ephemeralRunner); err != nil {
log.Error(err, "Failed to delete the ephemeral runner that has a job assigned but the pod has failed")
return ctrl.Result{}, err
}
log.Info("Deleted the ephemeral runner that has a job assigned but the pod has failed")
log.Info("Trying to remove the runner from the service")
actionsClient, err := r.GetActionsService(ctx, ephemeralRunner)
if err != nil {
log.Error(err, "Failed to get actions client for removing the runner from the service")
return ctrl.Result{}, nil
}
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
log.Error(err, "Failed to remove the runner from the service")
return ctrl.Result{}, nil
}
log.Info("Removed the runner from the service")
return ctrl.Result{}, nil
}
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
log.Error(err, "Failed to delete runner pod on failure")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
default:
// pod succeeded. We double-check with the service if the runner exists.
// The reason is that image can potentially finish with status 0, but not pick up the job.
existsInService, err := r.runnerRegisteredWithService(ctx, ephemeralRunner.DeepCopy(), log)
if err != nil {
log.Error(err, "Failed to check if runner is registered with the service")
return ctrl.Result{}, err
}
if !existsInService {
// the runner does not exist in the service, so it must be done
log.Info("Ephemeral runner has finished since it does not exist in the service anymore")
if err := r.markAsFinished(ctx, ephemeralRunner, log); err != nil {
log.Error(err, "Failed to mark ephemeral runner as finished")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// The runner still exists. This can happen if the pod exited with 0 but fails to start
log.Info("Ephemeral runner pod has finished, but the runner still exists in the service. Deleting the pod to restart it.")
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
log.Error(err, "failed to delete a pod that still exists in the service")
default: // succeeded
log.Info("Ephemeral runner has finished successfully, deleting ephemeral runner", "exitCode", cs.State.Terminated.ExitCode)
if err := r.Delete(ctx, ephemeralRunner); err != nil {
log.Error(err, "Failed to delete ephemeral runner after successful completion")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
@@ -319,7 +390,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
err := r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, pod)
switch {
case err == nil:
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
if pod.DeletionTimestamp.IsZero() {
log.Info("Deleting the runner pod")
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
return fmt.Errorf("failed to delete pod: %w", err)
@@ -339,7 +410,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, secret)
switch {
case err == nil:
if secret.ObjectMeta.DeletionTimestamp.IsZero() {
if secret.DeletionTimestamp.IsZero() {
log.Info("Deleting the jitconfig secret")
if err := r.Delete(ctx, secret); err != nil && !kerrors.IsNotFound(err) {
return fmt.Errorf("failed to delete secret: %w", err)
@@ -393,7 +464,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
var errs []error
for i := range runnerLinkedPodList.Items {
linkedPod := &runnerLinkedPodList.Items[i]
if !linkedPod.ObjectMeta.DeletionTimestamp.IsZero() {
if !linkedPod.DeletionTimestamp.IsZero() {
continue
}
@@ -409,7 +480,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
runnerLinkedLabels := client.MatchingLabels(
map[string]string{
"runner-pod": ephemeralRunner.ObjectMeta.Name,
"runner-pod": ephemeralRunner.Name,
},
)
var runnerLinkedSecretList corev1.SecretList
@@ -427,7 +498,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
var errs []error
for i := range runnerLinkedSecretList.Items {
s := &runnerLinkedSecretList.Items[i]
if !s.ObjectMeta.DeletionTimestamp.IsZero() {
if !s.DeletionTimestamp.IsZero() {
continue
}
@@ -459,22 +530,10 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
return nil
}
func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
log.Info("Updating ephemeral runner status to Finished")
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = corev1.PodSucceeded
}); err != nil {
return fmt.Errorf("failed to update ephemeral runner with status finished: %w", err)
}
log.Info("EphemeralRunner status is marked as Finished")
return nil
}
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
// It should not be responsible for setting the status to Failed.
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
if pod.DeletionTimestamp.IsZero() {
log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
return fmt.Errorf("failed to delete pod with status failed: %w", err)
@@ -484,9 +543,9 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
log.Info("Updating ephemeral runner status to track the failure count")
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
if obj.Status.Failures == nil {
obj.Status.Failures = make(map[string]bool)
obj.Status.Failures = make(map[string]metav1.Time)
}
obj.Status.Failures[string(pod.UID)] = true
obj.Status.Failures[string(pod.UID)] = metav1.Now()
obj.Status.Ready = false
obj.Status.Reason = pod.Status.Reason
obj.Status.Message = pod.Status.Message
@@ -498,14 +557,12 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
return nil
}
// updateStatusWithRunnerConfig fetches runtime configuration needed by the runner
// This method should always set .status.runnerId and .status.runnerJITConfig
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
func (r *EphemeralRunnerReconciler) createRunnerJitConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*actions.RunnerScaleSetJitRunnerConfig, error) {
// Runner is not registered with the service. We need to register it first
log.Info("Creating ephemeral runner JIT config")
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunner)
actionsClient, err := r.GetActionsService(ctx, ephemeralRunner)
if err != nil {
return &ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %w", err)
return nil, fmt.Errorf("failed to get actions client for generating JIT config: %w", err)
}
jitSettings := &actions.RunnerScaleSetJitRunnerSetting{
@@ -520,74 +577,52 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
}
jitConfig, err := actionsClient.GenerateJitRunnerConfig(ctx, jitSettings, ephemeralRunner.Spec.RunnerScaleSetId)
if err == nil { // if NO error
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
return jitConfig, nil
}
actionsError := &actions.ActionsError{}
if !errors.As(err, &actionsError) {
return nil, fmt.Errorf("failed to generate JIT config with generic error: %w", err)
}
if actionsError.StatusCode != http.StatusConflict ||
!actionsError.IsException("AgentExistsException") {
return nil, fmt.Errorf("failed to generate JIT config with Actions service error: %w", err)
}
// If the runner with the name we want already exists it means:
// - We might have a name collision.
// - Our previous reconciliation loop failed to update the
// status with the runnerId and runnerJITConfig after the `GenerateJitRunnerConfig`
// created the runner registration on the service.
// We will try to get the runner and see if it's belong to this AutoScalingRunnerSet,
// if so, we can simply delete the runner registration and create a new one.
log.Info("Getting runner jit config failed with conflict error, trying to get the runner by name", "runnerName", ephemeralRunner.Name)
existingRunner, err := actionsClient.GetRunnerByName(ctx, ephemeralRunner.Name)
if err != nil {
actionsError := &actions.ActionsError{}
if !errors.As(err, &actionsError) {
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %w", err)
}
return nil, fmt.Errorf("failed to get runner by name: %w", err)
}
if actionsError.StatusCode != http.StatusConflict ||
!actionsError.IsException("AgentExistsException") {
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %w", err)
}
if existingRunner == nil {
log.Info("Runner with the same name does not exist anymore, re-queuing the reconciliation")
return nil, fmt.Errorf("%w: runner existed, retry configuration", retryableError)
}
// If the runner with the name we want already exists it means:
// - We might have a name collision.
// - Our previous reconciliation loop failed to update the
// status with the runnerId and runnerJITConfig after the `GenerateJitRunnerConfig`
// created the runner registration on the service.
// We will try to get the runner and see if it's belong to this AutoScalingRunnerSet,
// if so, we can simply delete the runner registration and create a new one.
log.Info("Getting runner jit config failed with conflict error, trying to get the runner by name", "runnerName", ephemeralRunner.Name)
existingRunner, err := actionsClient.GetRunnerByName(ctx, ephemeralRunner.Name)
log.Info("Found the runner with the same name", "runnerId", existingRunner.Id, "runnerScaleSetId", existingRunner.RunnerScaleSetId)
if existingRunner.RunnerScaleSetId == ephemeralRunner.Spec.RunnerScaleSetId {
log.Info("Removing the runner with the same name")
err := actionsClient.RemoveRunner(ctx, int64(existingRunner.Id))
if err != nil {
return &ctrl.Result{}, fmt.Errorf("failed to get runner by name: %w", err)
return nil, fmt.Errorf("failed to remove runner from the service: %w", err)
}
if existingRunner == nil {
log.Info("Runner with the same name does not exist, re-queuing the reconciliation")
return &ctrl.Result{Requeue: true}, nil
}
log.Info("Found the runner with the same name", "runnerId", existingRunner.Id, "runnerScaleSetId", existingRunner.RunnerScaleSetId)
if existingRunner.RunnerScaleSetId == ephemeralRunner.Spec.RunnerScaleSetId {
log.Info("Removing the runner with the same name")
err := actionsClient.RemoveRunner(ctx, int64(existingRunner.Id))
if err != nil {
return &ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %w", err)
}
log.Info("Removed the runner with the same name, re-queuing the reconciliation")
return &ctrl.Result{Requeue: true}, nil
}
// TODO: Do we want to mark the ephemeral runner as failed, and let EphemeralRunnerSet to clean it up, so we can recover from this situation?
// The situation is that the EphemeralRunner's name is already used by something else to register a runner, and we can't take the control back.
return &ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %w", err)
}
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
log.Info("Updating ephemeral runner status with runnerId and runnerJITConfig")
err = patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.RunnerId = jitConfig.Runner.Id
obj.Status.RunnerName = jitConfig.Runner.Name
obj.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
})
if err != nil {
return &ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %w", err)
log.Info("Removed the runner with the same name, re-queuing the reconciliation")
return nil, fmt.Errorf("%w: runner existed belonging to the scale set, retry configuration", retryableError)
}
// We want to continue without a requeue for faster pod creation.
//
// To do so, we update the status in-place, so that both continuing the loop and
// and requeuing and skipping updateStatusWithRunnerConfig in the next loop, will
// have the same effect.
ephemeralRunner.Status.RunnerId = jitConfig.Runner.Id
ephemeralRunner.Status.RunnerName = jitConfig.Runner.Name
ephemeralRunner.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
log.Info("Updated ephemeral runner status with runnerId and runnerJITConfig")
return nil, nil
return nil, fmt.Errorf("%w: runner with the same name but doesn't belong to this RunnerScaleSet: %w", fatalError, err)
}
func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) {
@@ -640,7 +675,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
}
log.Info("Creating new pod for ephemeral runner")
newPod := r.ResourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...)
newPod := r.newEphemeralRunnerPod(ctx, runner, secret, envs...)
if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil {
log.Error(err, "Failed to set controller reference to a new pod")
@@ -663,21 +698,21 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
return ctrl.Result{}, nil
}
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, jitConfig *actions.RunnerScaleSetJitRunnerConfig, log logr.Logger) (*corev1.Secret, error) {
log.Info("Creating new secret for ephemeral runner")
jitSecret := r.ResourceBuilder.newEphemeralRunnerJitSecret(runner)
jitSecret := r.newEphemeralRunnerJitSecret(runner, jitConfig)
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %w", err)
return nil, fmt.Errorf("failed to set controller reference: %w", err)
}
log.Info("Created new secret spec for ephemeral runner")
if err := r.Create(ctx, jitSecret); err != nil {
return &ctrl.Result{}, fmt.Errorf("failed to create jit secret: %w", err)
return nil, fmt.Errorf("failed to create jit secret: %w", err)
}
log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name)
return nil, nil
return jitSecret, nil
}
// updateRunStatusFromPod is responsible for updating non-exiting statuses.
@@ -727,104 +762,8 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
return nil
}
func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) (actions.ActionsService, error) {
secret := new(corev1.Secret)
if err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: runner.Spec.GitHubConfigSecret}, secret); err != nil {
return nil, fmt.Errorf("failed to get secret: %w", err)
}
opts, err := r.actionsClientOptionsFor(ctx, runner)
if err != nil {
return nil, fmt.Errorf("failed to get actions client options: %w", err)
}
return r.ActionsClient.GetClientFromSecret(
ctx,
runner.Spec.GitHubConfigUrl,
runner.Namespace,
secret.Data,
opts...,
)
}
func (r *EphemeralRunnerReconciler) actionsClientOptionsFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) ([]actions.ClientOption, error) {
var opts []actions.ClientOption
if runner.Spec.Proxy != nil {
proxyFunc, err := runner.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
var secret corev1.Secret
err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: s}, &secret)
if err != nil {
return nil, fmt.Errorf("failed to get proxy secret %s: %w", s, err)
}
return &secret, nil
})
if err != nil {
return nil, fmt.Errorf("failed to get proxy func: %w", err)
}
opts = append(opts, actions.WithProxy(proxyFunc))
}
tlsConfig := runner.Spec.GitHubServerTLS
if tlsConfig != nil {
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
var configmap corev1.ConfigMap
err := r.Get(
ctx,
types.NamespacedName{
Namespace: runner.Namespace,
Name: name,
},
&configmap,
)
if err != nil {
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
}
return []byte(configmap.Data[key]), nil
})
if err != nil {
return nil, fmt.Errorf("failed to get tls config: %w", err)
}
opts = append(opts, actions.WithRootCAs(pool))
}
return opts, nil
}
// runnerRegisteredWithService checks if the runner is still registered with the service
// Returns found=false and err=nil if ephemeral runner does not exist in GitHub service and should be deleted
func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (found bool, err error) {
actionsClient, err := r.actionsClientFor(ctx, runner)
if err != nil {
return false, fmt.Errorf("failed to get Actions client for ScaleSet: %w", err)
}
log.Info("Checking if runner exists in GitHub service", "runnerId", runner.Status.RunnerId)
_, err = actionsClient.GetRunner(ctx, int64(runner.Status.RunnerId))
if err != nil {
actionsError := &actions.ActionsError{}
if !errors.As(err, &actionsError) {
return false, err
}
if actionsError.StatusCode != http.StatusNotFound ||
!actionsError.IsException("AgentNotFoundException") {
return false, fmt.Errorf("failed to check if runner exists in GitHub service: %w", err)
}
log.Info("Runner does not exist in GitHub service", "runnerId", runner.Status.RunnerId)
return false, nil
}
log.Info("Runner exists in GitHub service", "runnerId", runner.Status.RunnerId)
return true, nil
}
func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
client, err := r.actionsClientFor(ctx, ephemeralRunner)
client, err := r.GetActionsService(ctx, ephemeralRunner)
if err != nil {
return fmt.Errorf("failed to get actions client for runner: %w", err)
}

View File

@@ -30,7 +30,7 @@ import (
const (
ephemeralRunnerTimeout = time.Second * 20
ephemeralRunnerInterval = time.Millisecond * 250
ephemeralRunnerInterval = time.Millisecond * 10
runnerImage = "ghcr.io/actions/actions-runner:latest"
)
@@ -107,10 +107,15 @@ var _ = Describe("EphemeralRunner", func() {
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
controller = &EphemeralRunnerReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ActionsClient: fake.NewMultiClient(),
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: fake.NewMultiClient(),
},
},
}
err := controller.SetupWithManager(mgr)
@@ -171,7 +176,7 @@ var _ = Describe("EphemeralRunner", func() {
).Should(BeEquivalentTo(ephemeralRunner.Name))
})
It("It should re-create pod on failure", func() {
It("It should re-create pod on failure and no job assigned", func() {
pod := new(corev1.Pod)
Eventually(func() (bool, error) {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
@@ -195,6 +200,67 @@ var _ = Describe("EphemeralRunner", func() {
).Should(BeEquivalentTo(true))
})
It("It should delete ephemeral runner on failure and job assigned", func() {
er := new(v1alpha1.EphemeralRunner)
// Check if finalizer is added
Eventually(
func() error {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
return err
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(Succeed(), "failed to get ephemeral runner")
// update job id to simulate job assigned
er.Status.JobID = "1"
err := k8sClient.Status().Update(ctx, er)
Expect(err).To(BeNil(), "failed to update ephemeral runner status")
er = new(v1alpha1.EphemeralRunner)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
if err != nil {
return "", err
}
return er.Status.JobID, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo("1"))
pod := new(corev1.Pod)
Eventually(func() (bool, error) {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
return false, err
}
return true, nil
}).Should(BeEquivalentTo(true))
// delete pod to simulate failure
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: v1alpha1.EphemeralRunnerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
},
},
})
err = k8sClient.Status().Update(ctx, pod)
Expect(err).To(BeNil(), "Failed to update pod status")
er = new(v1alpha1.EphemeralRunner)
Eventually(
func() bool {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
return kerrors.IsNotFound(err)
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
})
It("It should failed if a pod template is invalid", func() {
invalideEphemeralRunner := newExampleRunner("invalid-ephemeral-runner", autoscalingNS.Name, configSecret.Name)
invalideEphemeralRunner.Spec.Spec.PriorityClassName = "notexist"
@@ -203,13 +269,22 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).To(BeNil())
updated := new(v1alpha1.EphemeralRunner)
Eventually(func() (corev1.PodPhase, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: invalideEphemeralRunner.Name, Namespace: invalideEphemeralRunner.Namespace}, updated)
if err != nil {
return "", nil
}
return updated.Status.Phase, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodFailed))
Eventually(
func() (corev1.PodPhase, error) {
err := k8sClient.Get(
ctx,
client.ObjectKey{Name: invalideEphemeralRunner.Name, Namespace: invalideEphemeralRunner.Namespace},
updated,
)
if err != nil {
return "", nil
}
return updated.Status.Phase, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(corev1.PodFailed))
Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
})
@@ -528,44 +603,26 @@ var _ = Describe("EphemeralRunner", func() {
).Should(BeEquivalentTo(""))
})
It("It should not re-create pod indefinitely", func() {
It("It should eventually delete ephemeral runner after consecutive failures", func() {
updated := new(v1alpha1.EphemeralRunner)
pod := new(corev1.Pod)
Eventually(
func() (bool, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
if err != nil {
return false, err
}
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
if err != nil {
if kerrors.IsNotFound(err) && len(updated.Status.Failures) > 5 {
return true, nil
}
return false, err
}
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: v1alpha1.EphemeralRunnerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
},
},
})
err = k8sClient.Status().Update(ctx, pod)
Expect(err).To(BeNil(), "Failed to update pod status")
return false, fmt.Errorf("pod haven't failed for 5 times.")
func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true), "we should stop creating pod after 5 failures")
).Should(Succeed(), "failed to get ephemeral runner")
failEphemeralRunnerPod := func() *corev1.Pod {
pod := new(corev1.Pod)
Eventually(
func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: updated.Name, Namespace: updated.Namespace}, pod)
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(Succeed(), "failed to get ephemeral runner pod")
// In case we still have pod created due to controller-runtime cache delay, mark the container as exited
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
if err == nil {
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: v1alpha1.EphemeralRunnerContainerName,
State: corev1.ContainerState{
@@ -576,25 +633,70 @@ var _ = Describe("EphemeralRunner", func() {
})
err := k8sClient.Status().Update(ctx, pod)
Expect(err).To(BeNil(), "Failed to update pod status")
return pod
}
// EphemeralRunner should failed with reason TooManyPodFailures
Eventually(func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
if err != nil {
return "", err
}
return updated.Status.Reason, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures")
for i := range 5 {
pod := failEphemeralRunnerPod()
// EphemeralRunner should not have any pod
Eventually(func() (bool, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
if err == nil {
return false, nil
}
return kerrors.IsNotFound(err), nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
Eventually(
func() (int, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
if err != nil {
return 0, err
}
return len(updated.Status.Failures), nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(i + 1))
Eventually(
func() error {
nextPod := new(corev1.Pod)
err := k8sClient.Get(ctx, client.ObjectKey{Name: pod.Name, Namespace: pod.Namespace}, nextPod)
if err != nil {
return err
}
if nextPod.UID != pod.UID {
return nil
}
return fmt.Errorf("pod not recreated")
},
).WithTimeout(20*time.Second).WithPolling(10*time.Millisecond).Should(Succeed(), "pod should be recreated")
Eventually(
func() (bool, error) {
pod := new(corev1.Pod)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
if err != nil {
return false, err
}
for _, cs := range pod.Status.ContainerStatuses {
if cs.Name == v1alpha1.EphemeralRunnerContainerName {
return cs.State.Terminated == nil, nil
}
}
return true, nil
},
).WithTimeout(20*time.Second).WithPolling(10*time.Millisecond).Should(BeEquivalentTo(true), "pod should be terminated")
}
failEphemeralRunnerPod()
Eventually(
func() (bool, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
if kerrors.IsNotFound(err) {
return true, nil
}
return false, err
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
})
It("It should re-create pod on eviction", func() {
@@ -643,53 +745,6 @@ var _ = Describe("EphemeralRunner", func() {
).Should(BeEquivalentTo(true))
})
It("It should re-create pod on exit status 0, but runner exists within the service", func() {
pod := new(corev1.Pod)
Eventually(
func() (bool, error) {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
return false, err
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: v1alpha1.EphemeralRunnerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
})
err := k8sClient.Status().Update(ctx, pod)
Expect(err).To(BeNil(), "failed to update pod status")
updated := new(v1alpha1.EphemeralRunner)
Eventually(func() (bool, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
if err != nil {
return false, err
}
return len(updated.Status.Failures) == 1, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
// should re-create after failure
Eventually(
func() (bool, error) {
pod := new(corev1.Pod)
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
return false, err
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
It("It should not set the phase to succeeded without pod termination status", func() {
pod := new(corev1.Pod)
Eventually(
@@ -762,22 +817,27 @@ var _ = Describe("EphemeralRunner", func() {
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ActionsClient: fake.NewMultiClient(
fake.WithDefaultClient(
fake.NewFakeClient(
fake.WithGetRunner(
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: fake.NewMultiClient(
fake.WithDefaultClient(
fake.NewFakeClient(
fake.WithGetRunner(
nil,
&actions.ActionsError{
StatusCode: http.StatusNotFound,
Err: &actions.ActionsExceptionError{
ExceptionName: "AgentNotFoundException",
},
},
),
),
nil,
&actions.ActionsError{
StatusCode: http.StatusNotFound,
Err: &actions.ActionsExceptionError{
ExceptionName: "AgentNotFoundException",
},
},
),
),
nil,
),
),
},
},
}
err := controller.SetupWithManager(mgr)
Expect(err).To(BeNil(), "failed to setup controller")
@@ -785,7 +845,7 @@ var _ = Describe("EphemeralRunner", func() {
startManagers(GinkgoT(), mgr)
})
It("It should set the Phase to Succeeded", func() {
It("It should delete EphemeralRunner when pod exits successfully", func() {
ephemeralRunner := newExampleRunner("test-runner", autoscalingNS.Name, configSecret.Name)
err := k8sClient.Create(ctx, ephemeralRunner)
@@ -811,13 +871,18 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).To(BeNil(), "failed to update pod status")
updated := new(v1alpha1.EphemeralRunner)
Eventually(func() (corev1.PodPhase, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
if err != nil {
return "", nil
}
return updated.Status.Phase, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodSucceeded))
Eventually(
func() bool {
err := k8sClient.Get(
ctx,
client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace},
updated,
)
return kerrors.IsNotFound(err)
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeTrue())
})
})
@@ -834,10 +899,15 @@ var _ = Describe("EphemeralRunner", func() {
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoScalingNS.Name)
controller = &EphemeralRunnerReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ActionsClient: fake.NewMultiClient(),
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: fake.NewMultiClient(),
},
},
}
err := controller.SetupWithManager(mgr)
Expect(err).To(BeNil(), "failed to setup controller")
@@ -847,7 +917,12 @@ var _ = Describe("EphemeralRunner", func() {
It("uses an actions client with proxy transport", func() {
// Use an actual client
controller.ActionsClient = actions.NewMultiClient(logr.Discard())
controller.ResourceBuilder = ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: actions.NewMultiClient(logr.Discard()),
},
}
proxySuccessfulllyCalled := false
proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -998,10 +1073,15 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs")
controller = &EphemeralRunnerReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ActionsClient: fake.NewMultiClient(),
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: fake.NewMultiClient(),
},
},
}
err = controller.SetupWithManager(mgr)
@@ -1032,11 +1112,16 @@ var _ = Describe("EphemeralRunner", func() {
server.StartTLS()
// Use an actual client
controller.ActionsClient = actions.NewMultiClient(logr.Discard())
controller.ResourceBuilder = ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: actions.NewMultiClient(logr.Discard()),
},
}
ephemeralRunner := newExampleRunner("test-runner", autoScalingNS.Name, configSecret.Name)
ephemeralRunner.Spec.GitHubConfigUrl = server.ConfigURLForOrg("my-org")
ephemeralRunner.Spec.GitHubServerTLS = &v1alpha1.GitHubServerTLSConfig{
ephemeralRunner.Spec.GitHubServerTLS = &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{

View File

@@ -83,7 +83,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
}
// Requested deletion does not need reconciled.
if !ephemeralRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
if !ephemeralRunnerSet.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(ephemeralRunnerSet, ephemeralRunnerSetFinalizerName) {
return ctrl.Result{}, nil
}
@@ -331,7 +331,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
return false, nil
}
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunnerSet)
actionsClient, err := r.GetActionsService(ctx, ephemeralRunnerSet)
if err != nil {
return false, err
}
@@ -360,7 +360,7 @@ func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Contex
// Track multiple errors at once and return the bundle.
errs := make([]error, 0)
for i := 0; i < count; i++ {
ephemeralRunner := r.ResourceBuilder.newEphemeralRunner(runnerSet)
ephemeralRunner := r.newEphemeralRunner(runnerSet)
if runnerSet.Spec.EphemeralRunnerSpec.Proxy != nil {
ephemeralRunner.Spec.ProxySecretRef = proxyEphemeralRunnerSetSecretName(runnerSet)
}
@@ -439,7 +439,7 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
log.Info("No pending or running ephemeral runners running at this time for scale down")
return nil
}
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunnerSet)
actionsClient, err := r.GetActionsService(ctx, ephemeralRunnerSet)
if err != nil {
return fmt.Errorf("failed to create actions client for ephemeral runner replica set: %w", err)
}
@@ -453,8 +453,13 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
continue
}
if !isDone && ephemeralRunner.Status.JobRequestId > 0 {
log.Info("Skipping ephemeral runner since it is running a job", "name", ephemeralRunner.Name, "jobRequestId", ephemeralRunner.Status.JobRequestId)
if !isDone && ephemeralRunner.HasJob() {
log.Info(
"Skipping ephemeral runner since it is running a job",
"name", ephemeralRunner.Name,
"workflowRunId", ephemeralRunner.Status.WorkflowRunId,
"jobId", ephemeralRunner.Status.JobID,
)
continue
}
@@ -502,73 +507,6 @@ func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ct
return true, nil
}
func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) (actions.ActionsService, error) {
secret := new(corev1.Secret)
if err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: rs.Spec.EphemeralRunnerSpec.GitHubConfigSecret}, secret); err != nil {
return nil, fmt.Errorf("failed to get secret: %w", err)
}
opts, err := r.actionsClientOptionsFor(ctx, rs)
if err != nil {
return nil, fmt.Errorf("failed to get actions client options: %w", err)
}
return r.ActionsClient.GetClientFromSecret(
ctx,
rs.Spec.EphemeralRunnerSpec.GitHubConfigUrl,
rs.Namespace,
secret.Data,
opts...,
)
}
func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) ([]actions.ClientOption, error) {
var opts []actions.ClientOption
if rs.Spec.EphemeralRunnerSpec.Proxy != nil {
proxyFunc, err := rs.Spec.EphemeralRunnerSpec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
var secret corev1.Secret
err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: s}, &secret)
if err != nil {
return nil, fmt.Errorf("failed to get secret %s: %w", s, err)
}
return &secret, nil
})
if err != nil {
return nil, fmt.Errorf("failed to get proxy func: %w", err)
}
opts = append(opts, actions.WithProxy(proxyFunc))
}
tlsConfig := rs.Spec.EphemeralRunnerSpec.GitHubServerTLS
if tlsConfig != nil {
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
var configmap corev1.ConfigMap
err := r.Get(
ctx,
types.NamespacedName{
Namespace: rs.Namespace,
Name: name,
},
&configmap,
)
if err != nil {
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
}
return []byte(configmap.Data[key]), nil
})
if err != nil {
return nil, fmt.Errorf("failed to get tls config: %w", err)
}
opts = append(opts, actions.WithRootCAs(pool))
}
return opts, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
@@ -641,7 +579,7 @@ func newEphemeralRunnerState(ephemeralRunnerList *v1alpha1.EphemeralRunnerList)
if err == nil && patchID > ephemeralRunnerState.latestPatchID {
ephemeralRunnerState.latestPatchID = patchID
}
if !r.ObjectMeta.DeletionTimestamp.IsZero() {
if !r.DeletionTimestamp.IsZero() {
ephemeralRunnerState.deleting = append(ephemeralRunnerState.deleting, r)
continue
}

View File

@@ -10,6 +10,7 @@ import (
"os"
"path/filepath"
"strings"
"testing"
"time"
corev1 "k8s.io/api/core/v1"
@@ -21,6 +22,7 @@ import (
"github.com/go-logr/logr"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
@@ -35,6 +37,10 @@ const (
ephemeralRunnerSetTestGitHubToken = "gh_token"
)
func TestPrecomputedConstants(t *testing.T) {
require.Equal(t, len(failedRunnerBackoff), maxFailures+1)
}
var _ = Describe("Test EphemeralRunnerSet controller", func() {
var ctx context.Context
var mgr ctrl.Manager
@@ -48,10 +54,15 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
controller := &EphemeralRunnerSetReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ActionsClient: fake.NewMultiClient(),
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: fake.NewMultiClient(),
},
},
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1098,10 +1109,15 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
controller := &EphemeralRunnerSetReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ActionsClient: actions.NewMultiClient(logr.Discard()),
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: actions.NewMultiClient(logr.Discard()),
},
},
}
err := controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1397,10 +1413,15 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs")
controller := &EphemeralRunnerSetReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ActionsClient: actions.NewMultiClient(logr.Discard()),
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Log: logf.Log,
ResourceBuilder: ResourceBuilder{
SecretResolver: &SecretResolver{
k8sClient: mgr.GetClient(),
multiClient: actions.NewMultiClient(logr.Discard()),
},
},
}
err = controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
@@ -1439,7 +1460,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
GitHubConfigUrl: server.ConfigURLForOrg("my-org"),
GitHubConfigSecret: configSecret.Name,
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
GitHubServerTLS: &v1alpha1.TLSConfig{
CertificateFrom: &v1alpha1.TLSCertificateSource{
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{

View File

@@ -0,0 +1,12 @@
package actionsgithubcom
type controllerError string
func (e controllerError) Error() string {
return string(e)
}
const (
retryableError = controllerError("retryable error")
fatalError = controllerError("fatal error")
)

View File

@@ -5,17 +5,20 @@ import (
"context"
"encoding/json"
"fmt"
"maps"
"math"
"net"
"strconv"
"strings"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/build"
listenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
ghalistenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/hash"
"github.com/actions/actions-runner-controller/logging"
"github.com/actions/actions-runner-controller/vault/azurekeyvault"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -71,6 +74,7 @@ func SetListenerEntrypoint(entrypoint string) {
type ResourceBuilder struct {
ExcludeLabelPropagationPrefixes []string
*SecretResolver
}
// boolPtr returns a pointer to a bool value
@@ -120,6 +124,7 @@ func (b *ResourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
VaultConfig: autoscalingRunnerSet.VaultConfig(),
RunnerScaleSetId: runnerScaleSetId,
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
@@ -159,7 +164,7 @@ func (lm *listenerMetricsServerConfig) containerPort() (corev1.ContainerPort, er
}, nil
}
func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, appConfig *appconfig.AppConfig, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
var (
metricsAddr = ""
metricsEndpoint = ""
@@ -169,30 +174,8 @@ func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
metricsEndpoint = metricsConfig.endpoint
}
var appID int64
if id, ok := secret.Data["github_app_id"]; ok {
var err error
appID, err = strconv.ParseInt(string(id), 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to convert github_app_id to int: %v", err)
}
}
var appInstallationID int64
if id, ok := secret.Data["github_app_installation_id"]; ok {
var err error
appInstallationID, err = strconv.ParseInt(string(id), 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to convert github_app_installation_id to int: %v", err)
}
}
config := listenerconfig.Config{
config := ghalistenerconfig.Config{
ConfigureUrl: autoscalingListener.Spec.GitHubConfigUrl,
AppID: appID,
AppInstallationID: appInstallationID,
AppPrivateKey: string(secret.Data["github_app_private_key"]),
Token: string(secret.Data["github_token"]),
EphemeralRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
EphemeralRunnerSetName: autoscalingListener.Spec.EphemeralRunnerSetName,
MaxRunners: autoscalingListener.Spec.MaxRunners,
@@ -207,6 +190,24 @@ func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
Metrics: autoscalingListener.Spec.Metrics,
}
vault := autoscalingListener.Spec.VaultConfig
if vault == nil {
config.AppConfig = appConfig
} else {
config.VaultType = vault.Type
config.VaultLookupKey = autoscalingListener.Spec.GitHubConfigSecret
config.AzureKeyVaultConfig = &azurekeyvault.Config{
TenantID: vault.AzureKeyVault.TenantID,
ClientID: vault.AzureKeyVault.ClientID,
URL: vault.AzureKeyVault.URL,
CertificatePath: vault.AzureKeyVault.CertificatePath,
}
}
if err := config.Validate(); err != nil {
return nil, fmt.Errorf("invalid listener config: %w", err)
}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(config); err != nil {
return nil, fmt.Errorf("failed to encode config: %w", err)
@@ -223,7 +224,7 @@ func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
}, nil
}
func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
listenerEnv := []corev1.EnvVar{
{
Name: "LISTENER_CONFIG_PATH",
@@ -278,9 +279,7 @@ func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
}
labels := make(map[string]string, len(autoscalingListener.Labels))
for key, val := range autoscalingListener.Labels {
labels[key] = val
}
maps.Copy(labels, autoscalingListener.Labels)
newRunnerScaleSetListenerPod := &corev1.Pod{
TypeMeta: metav1.TypeMeta{
@@ -429,7 +428,7 @@ func mergeListenerContainer(base, from *corev1.Container) {
func (b *ResourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerServiceAccountName(autoscalingListener),
Name: autoscalingListener.Name,
Namespace: autoscalingListener.Namespace,
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
@@ -444,7 +443,7 @@ func (b *ResourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.
rulesHash := hash.ComputeTemplateHash(&rules)
newRole := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerRoleName(autoscalingListener),
Name: autoscalingListener.Name,
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
@@ -478,7 +477,7 @@ func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
newRoleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerRoleName(autoscalingListener),
Name: autoscalingListener.Name,
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
@@ -496,25 +495,6 @@ func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
return newRoleBinding
}
func (b *ResourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
dataHash := hash.ComputeTemplateHash(&secret.Data)
newListenerSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerSecretMirrorName(autoscalingListener),
Namespace: autoscalingListener.Namespace,
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
"secret-data-hash": dataHash,
}),
},
Data: secret.DeepCopy().Data,
}
return newListenerSecret
}
func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
@@ -543,8 +523,8 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
GenerateName: autoscalingRunnerSet.Name + "-",
Namespace: autoscalingRunnerSet.Namespace,
Labels: labels,
Annotations: newAnnotations,
OwnerReferences: []metav1.OwnerReference{
@@ -567,6 +547,7 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
Proxy: autoscalingRunnerSet.Spec.Proxy,
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
VaultConfig: autoscalingRunnerSet.VaultConfig(),
},
},
}
@@ -588,6 +569,7 @@ func (b *ResourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme
for key, val := range ephemeralRunnerSet.Annotations {
annotations[key] = val
}
annotations[AnnotationKeyPatchID] = strconv.Itoa(ephemeralRunnerSet.Spec.PatchID)
return &v1alpha1.EphemeralRunner{
TypeMeta: metav1.TypeMeta{},
@@ -617,18 +599,18 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
labels := map[string]string{}
annotations := map[string]string{}
for k, v := range runner.ObjectMeta.Labels {
for k, v := range runner.Labels {
labels[k] = v
}
for k, v := range runner.Spec.PodTemplateSpec.Labels {
for k, v := range runner.Spec.Labels {
labels[k] = v
}
labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue)
for k, v := range runner.ObjectMeta.Annotations {
for k, v := range runner.Annotations {
annotations[k] = v
}
for k, v := range runner.Spec.PodTemplateSpec.Annotations {
for k, v := range runner.Spec.Annotations {
annotations[k] = v
}
@@ -636,12 +618,12 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
FilterLabels(labels, LabelKeyRunnerTemplateHash),
annotations,
runner.Spec,
runner.Status.RunnerJITConfig,
secret.Data,
)
objectMeta := metav1.ObjectMeta{
Name: runner.ObjectMeta.Name,
Namespace: runner.ObjectMeta.Namespace,
Name: runner.Name,
Namespace: runner.Namespace,
Labels: labels,
Annotations: annotations,
OwnerReferences: []metav1.OwnerReference{
@@ -657,10 +639,10 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
}
newPod.ObjectMeta = objectMeta
newPod.Spec = runner.Spec.PodTemplateSpec.Spec
newPod.Spec.Containers = make([]corev1.Container, 0, len(runner.Spec.PodTemplateSpec.Spec.Containers))
newPod.Spec = runner.Spec.Spec
newPod.Spec.Containers = make([]corev1.Container, 0, len(runner.Spec.Spec.Containers))
for _, c := range runner.Spec.PodTemplateSpec.Spec.Containers {
for _, c := range runner.Spec.Spec.Containers {
if c.Name == v1alpha1.EphemeralRunnerContainerName {
c.Env = append(
c.Env,
@@ -689,14 +671,17 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
return &newPod
}
func (b *ResourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret {
func (b *ResourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner, jitConfig *actions.RunnerScaleSetJitRunnerConfig) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: ephemeralRunner.Name,
Namespace: ephemeralRunner.Namespace,
},
Data: map[string][]byte{
jitTokenKey: []byte(ephemeralRunner.Status.RunnerJITConfig),
jitTokenKey: []byte(jitConfig.EncodedJITConfig),
"runnerName": []byte(jitConfig.Runner.Name),
"runnerId": []byte(strconv.Itoa(jitConfig.Runner.Id)),
"scaleSetId": []byte(strconv.Itoa(jitConfig.Runner.RunnerScaleSetId)),
},
}
}
@@ -713,30 +698,6 @@ func scaleSetListenerName(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) s
return fmt.Sprintf("%v-%v-listener", autoscalingRunnerSet.Name, namespaceHash)
}
func scaleSetListenerServiceAccountName(autoscalingListener *v1alpha1.AutoscalingListener) string {
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
if len(namespaceHash) > 8 {
namespaceHash = namespaceHash[:8]
}
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
}
func scaleSetListenerRoleName(autoscalingListener *v1alpha1.AutoscalingListener) string {
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
if len(namespaceHash) > 8 {
namespaceHash = namespaceHash[:8]
}
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
}
func scaleSetListenerSecretMirrorName(autoscalingListener *v1alpha1.AutoscalingListener) string {
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
if len(namespaceHash) > 8 {
namespaceHash = namespaceHash[:8]
}
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
}
func proxyListenerSecretName(autoscalingListener *v1alpha1.AutoscalingListener) string {
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
if len(namespaceHash) > 8 {

View File

@@ -82,12 +82,7 @@ func TestLabelPropagation(t *testing.T) {
Name: "test",
},
}
listenerSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
},
}
listenerPod, err := b.newScaleSetListenerPod(listener, &corev1.Secret{}, listenerServiceAccount, listenerSecret, nil)
listenerPod, err := b.newScaleSetListenerPod(listener, &corev1.Secret{}, listenerServiceAccount, nil)
require.NoError(t, err)
assert.Equal(t, listenerPod.Labels, listener.Labels)

View File

@@ -0,0 +1,280 @@
package actionsgithubcom
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/vault"
"github.com/actions/actions-runner-controller/vault/azurekeyvault"
"golang.org/x/net/http/httpproxy"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type SecretResolver struct {
k8sClient client.Client
multiClient actions.MultiClient
}
type SecretResolverOption func(*SecretResolver)
func NewSecretResolver(k8sClient client.Client, multiClient actions.MultiClient, opts ...SecretResolverOption) *SecretResolver {
if k8sClient == nil {
panic("k8sClient must not be nil")
}
secretResolver := &SecretResolver{
k8sClient: k8sClient,
multiClient: multiClient,
}
for _, opt := range opts {
opt(secretResolver)
}
return secretResolver
}
type ActionsGitHubObject interface {
client.Object
GitHubConfigUrl() string
GitHubConfigSecret() string
GitHubProxy() *v1alpha1.ProxyConfig
GitHubServerTLS() *v1alpha1.TLSConfig
VaultConfig() *v1alpha1.VaultConfig
VaultProxy() *v1alpha1.ProxyConfig
}
func (sr *SecretResolver) GetAppConfig(ctx context.Context, obj ActionsGitHubObject) (*appconfig.AppConfig, error) {
resolver, err := sr.resolverForObject(ctx, obj)
if err != nil {
return nil, fmt.Errorf("failed to get resolver for object: %v", err)
}
appConfig, err := resolver.appConfig(ctx, obj.GitHubConfigSecret())
if err != nil {
return nil, fmt.Errorf("failed to resolve app config: %v", err)
}
return appConfig, nil
}
func (sr *SecretResolver) GetActionsService(ctx context.Context, obj ActionsGitHubObject) (actions.ActionsService, error) {
resolver, err := sr.resolverForObject(ctx, obj)
if err != nil {
return nil, fmt.Errorf("failed to get resolver for object: %v", err)
}
appConfig, err := resolver.appConfig(ctx, obj.GitHubConfigSecret())
if err != nil {
return nil, fmt.Errorf("failed to resolve app config: %v", err)
}
var clientOptions []actions.ClientOption
if proxy := obj.GitHubProxy(); proxy != nil {
config := &httpproxy.Config{
NoProxy: strings.Join(proxy.NoProxy, ","),
}
if proxy.HTTP != nil {
u, err := url.Parse(proxy.HTTP.Url)
if err != nil {
return nil, fmt.Errorf("failed to parse proxy http url %q: %w", proxy.HTTP.Url, err)
}
if ref := proxy.HTTP.CredentialSecretRef; ref != "" {
u.User, err = resolver.proxyCredentials(ctx, ref)
if err != nil {
return nil, fmt.Errorf("failed to resolve proxy credentials: %v", err)
}
}
config.HTTPProxy = u.String()
}
if proxy.HTTPS != nil {
u, err := url.Parse(proxy.HTTPS.Url)
if err != nil {
return nil, fmt.Errorf("failed to parse proxy https url %q: %w", proxy.HTTPS.Url, err)
}
if ref := proxy.HTTPS.CredentialSecretRef; ref != "" {
u.User, err = resolver.proxyCredentials(ctx, ref)
if err != nil {
return nil, fmt.Errorf("failed to resolve proxy credentials: %v", err)
}
}
config.HTTPSProxy = u.String()
}
proxyFunc := func(req *http.Request) (*url.URL, error) {
return config.ProxyFunc()(req.URL)
}
clientOptions = append(clientOptions, actions.WithProxy(proxyFunc))
}
tlsConfig := obj.GitHubServerTLS()
if tlsConfig != nil {
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
var configmap corev1.ConfigMap
err := sr.k8sClient.Get(
ctx,
types.NamespacedName{
Namespace: obj.GetNamespace(),
Name: name,
},
&configmap,
)
if err != nil {
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
}
return []byte(configmap.Data[key]), nil
})
if err != nil {
return nil, fmt.Errorf("failed to get tls config: %w", err)
}
clientOptions = append(clientOptions, actions.WithRootCAs(pool))
}
return sr.multiClient.GetClientFor(
ctx,
obj.GitHubConfigUrl(),
appConfig,
obj.GetNamespace(),
clientOptions...,
)
}
func (sr *SecretResolver) resolverForObject(ctx context.Context, obj ActionsGitHubObject) (resolver, error) {
vaultConfig := obj.VaultConfig()
if vaultConfig == nil || vaultConfig.Type == "" {
return &k8sResolver{
namespace: obj.GetNamespace(),
client: sr.k8sClient,
}, nil
}
var proxy *httpproxy.Config
if vaultProxy := obj.VaultProxy(); vaultProxy != nil {
p, err := vaultProxy.ToHTTPProxyConfig(func(s string) (*corev1.Secret, error) {
var secret corev1.Secret
err := sr.k8sClient.Get(ctx, types.NamespacedName{Name: s, Namespace: obj.GetNamespace()}, &secret)
if err != nil {
return nil, fmt.Errorf("failed to get secret %s: %w", s, err)
}
return &secret, nil
})
if err != nil {
return nil, fmt.Errorf("failed to create proxy config: %v", err)
}
proxy = p
}
switch vaultConfig.Type {
case vault.VaultTypeAzureKeyVault:
akv, err := azurekeyvault.New(azurekeyvault.Config{
TenantID: vaultConfig.AzureKeyVault.TenantID,
ClientID: vaultConfig.AzureKeyVault.ClientID,
URL: vaultConfig.AzureKeyVault.URL,
CertificatePath: vaultConfig.AzureKeyVault.CertificatePath,
Proxy: proxy,
})
if err != nil {
return nil, fmt.Errorf("failed to create Azure Key Vault client: %v", err)
}
return &vaultResolver{
vault: akv,
}, nil
default:
return nil, fmt.Errorf("unknown vault type %q", vaultConfig.Type)
}
}
type resolver interface {
appConfig(ctx context.Context, key string) (*appconfig.AppConfig, error)
proxyCredentials(ctx context.Context, key string) (*url.Userinfo, error)
}
type k8sResolver struct {
namespace string
client client.Client
}
func (r *k8sResolver) appConfig(ctx context.Context, key string) (*appconfig.AppConfig, error) {
nsName := types.NamespacedName{
Namespace: r.namespace,
Name: key,
}
secret := new(corev1.Secret)
if err := r.client.Get(
ctx,
nsName,
secret,
); err != nil {
return nil, fmt.Errorf("failed to get kubernetes secret: %q", nsName.String())
}
return appconfig.FromSecret(secret)
}
func (r *k8sResolver) proxyCredentials(ctx context.Context, key string) (*url.Userinfo, error) {
nsName := types.NamespacedName{Namespace: r.namespace, Name: key}
secret := new(corev1.Secret)
if err := r.client.Get(
ctx,
nsName,
secret,
); err != nil {
return nil, fmt.Errorf("failed to get kubernetes secret: %q", nsName.String())
}
return url.UserPassword(
string(secret.Data["username"]),
string(secret.Data["password"]),
), nil
}
type vaultResolver struct {
vault vault.Vault
}
func (r *vaultResolver) appConfig(ctx context.Context, key string) (*appconfig.AppConfig, error) {
val, err := r.vault.GetSecret(ctx, key)
if err != nil {
return nil, fmt.Errorf("failed to resolve secret: %v", err)
}
return appconfig.FromJSONString(val)
}
func (r *vaultResolver) proxyCredentials(ctx context.Context, key string) (*url.Userinfo, error) {
val, err := r.vault.GetSecret(ctx, key)
if err != nil {
return nil, fmt.Errorf("failed to resolve secret: %v", err)
}
type info struct {
Username string `json:"username"`
Password string `json:"password"`
}
var i info
if err := json.Unmarshal([]byte(val), &i); err != nil {
return nil, fmt.Errorf("failed to unmarshal info: %v", err)
}
return url.UserPassword(i.Username, i.Password), nil
}

View File

@@ -20,6 +20,7 @@ import (
"os"
"path/filepath"
"testing"
"time"
"github.com/onsi/ginkgo/config"
@@ -79,6 +80,15 @@ var _ = BeforeSuite(func() {
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
failedRunnerBackoff = []time.Duration{
20 * time.Millisecond,
20 * time.Millisecond,
20 * time.Millisecond,
20 * time.Millisecond,
20 * time.Millisecond,
20 * time.Millisecond,
}
})
var _ = AfterSuite(func() {

View File

@@ -130,7 +130,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
jobs, resp, err := ghc.Actions.ListWorkflowJobs(context.TODO(), user, repoName, runID, &opt)
if err != nil {
r.Log.Error(err, "Error listing workflow jobs")
return //err
return // err
}
allJobs = append(allJobs, jobs.Jobs...)
if resp.NextPage == 0 {
@@ -345,7 +345,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
}
var runnerPodList corev1.PodList
if err := r.Client.List(ctx, &runnerPodList, client.InNamespace(hra.Namespace), client.MatchingLabels(map[string]string{
if err := r.List(ctx, &runnerPodList, client.InNamespace(hra.Namespace), client.MatchingLabels(map[string]string{
kindLabel: hra.Spec.ScaleTargetRef.Name,
})); err != nil {
return nil, err

Some files were not shown because too many files have changed in this diff Show More