mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
33 Commits
gha-runner
...
update-run
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
311a1688fc | ||
|
|
e3ed1ba226 | ||
|
|
652bd99439 | ||
|
|
f731873df9 | ||
|
|
088e2a3a90 | ||
|
|
2035e13724 | ||
|
|
04b966dfec | ||
|
|
0a0be027fd | ||
|
|
ddc2918a48 | ||
|
|
0e006bb0ff | ||
|
|
ce7722aed4 | ||
|
|
ad2dd7d787 | ||
|
|
30abbe0cab | ||
|
|
c27541140a | ||
|
|
52d65c333b | ||
|
|
a07dce28bb | ||
|
|
fb43abf1f3 | ||
|
|
9c42f9f2e1 | ||
|
|
ad826725ce | ||
|
|
4326693888 | ||
|
|
469a0faec4 | ||
|
|
349cc0835e | ||
|
|
aa14f50e45 | ||
|
|
ee8ca99e49 | ||
|
|
6a13540076 | ||
|
|
ded39bede6 | ||
|
|
9890c0592d | ||
|
|
3b5693eecb | ||
|
|
e6e621a50a | ||
|
|
0b2534ebc9 | ||
|
|
e858d67926 | ||
|
|
bc6c23609a | ||
|
|
666d0c52c4 |
4
.github/actions/setup-arc-e2e/action.yaml
vendored
4
.github/actions/setup-arc-e2e/action.yaml
vendored
@@ -36,8 +36,8 @@ runs:
|
||||
driver-opts: image=moby/buildkit:v0.10.6
|
||||
|
||||
- name: Build controller image
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
|
||||
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.18.0
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64
|
||||
|
||||
16
.github/workflows/arc-publish-chart.yaml
vendored
16
.github/workflows/arc-publish-chart.yaml
vendored
@@ -40,13 +40,12 @@ jobs:
|
||||
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
@@ -59,12 +58,11 @@ jobs:
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up chart-testing
|
||||
# https://github.com/helm/chart-testing-action/releases/tag/v2.7.0
|
||||
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
@@ -81,7 +79,6 @@ jobs:
|
||||
|
||||
- name: Create kind cluster
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
# https://github.com/helm/kind-action/releases/tag/v1.12.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
|
||||
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
@@ -137,7 +134,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -148,8 +145,7 @@ jobs:
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
@@ -188,7 +184,7 @@ jobs:
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
- name: Checkout target repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||
path: ${{ env.CHART_TARGET_REPO }}
|
||||
|
||||
7
.github/workflows/arc-publish.yaml
vendored
7
.github/workflows/arc-publish.yaml
vendored
@@ -39,9 +39,9 @@ jobs:
|
||||
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
|
||||
@@ -73,8 +73,7 @@ jobs:
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
|
||||
5
.github/workflows/arc-release-runners.yaml
vendored
5
.github/workflows/arc-release-runners.yaml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
name: Trigger Build and Push of Runner Images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Get runner version
|
||||
id: versions
|
||||
run: |
|
||||
@@ -39,8 +39,7 @@ jobs:
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
|
||||
@@ -21,7 +21,7 @@ jobs:
|
||||
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
|
||||
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Get runner current and latest versions
|
||||
id: runner_versions
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
|
||||
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: PR Name
|
||||
id: pr_name
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: New branch
|
||||
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
|
||||
|
||||
9
.github/workflows/arc-validate-chart.yaml
vendored
9
.github/workflows/arc-validate-chart.yaml
vendored
@@ -40,23 +40,21 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up chart-testing
|
||||
# https://github.com/helm/chart-testing-action/releases/tag/v2.7.0
|
||||
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
@@ -72,7 +70,6 @@ jobs:
|
||||
ct lint --config charts/.ci/ct-config.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
# https://github.com/helm/kind-action/releases/tag/v1.12.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
|
||||
|
||||
4
.github/workflows/arc-validate-runners.yaml
vendored
4
.github/workflows/arc-validate-runners.yaml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
name: runner / shellcheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: "Run shellcheck"
|
||||
run: make shellcheck
|
||||
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
|
||||
20
.github/workflows/gha-e2e-tests.yaml
vendored
20
.github/workflows/gha-e2e-tests.yaml
vendored
@@ -16,7 +16,7 @@ env:
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_REPO: arc_e2e_test_dummy
|
||||
IMAGE_NAME: "arc-test-image"
|
||||
IMAGE_VERSION: "0.12.0"
|
||||
IMAGE_VERSION: "0.12.1"
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -124,7 +124,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -217,7 +217,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: arc-test-dind-workflow.yaml
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -309,7 +309,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -410,7 +410,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -513,7 +513,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -610,7 +610,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -732,7 +732,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -904,7 +904,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: arc-test-workflow.yaml
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{ github.head_ref }}
|
||||
|
||||
|
||||
23
.github/workflows/gha-publish-chart.yaml
vendored
23
.github/workflows/gha-publish-chart.yaml
vendored
@@ -45,7 +45,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
@@ -72,11 +72,10 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
# https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
|
||||
with:
|
||||
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||
# BuildKit v0.11 which has a bug causing intermittent
|
||||
@@ -85,16 +84,14 @@ jobs:
|
||||
driver-opts: image=moby/buildkit:v0.10.6
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
# https://github.com/docker/login-action/releases/tag/v3.4.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build & push controller image
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
|
||||
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@@ -103,8 +100,6 @@ jobs:
|
||||
tags: |
|
||||
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}
|
||||
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}-${{ steps.resolve_parameters.outputs.short_sha }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
@@ -124,7 +119,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
@@ -143,8 +138,7 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
@@ -172,7 +166,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
@@ -191,8 +185,7 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
|
||||
18
.github/workflows/gha-validate-chart.yaml
vendored
18
.github/workflows/gha-validate-chart.yaml
vendored
@@ -36,23 +36,21 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up chart-testing
|
||||
# https://github.com/helm/chart-testing-action/releases/tag/v2.7.0
|
||||
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
@@ -69,14 +67,13 @@ jobs:
|
||||
ct lint --config charts/.ci/ct-config-gha.yaml
|
||||
|
||||
- name: Set up docker buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Build controller image
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
|
||||
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
file: Dockerfile
|
||||
@@ -91,7 +88,6 @@ jobs:
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Create kind cluster
|
||||
# https://github.com/helm/kind-action/releases/tag/v1.12.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
@@ -115,8 +111,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
cache: false
|
||||
|
||||
17
.github/workflows/global-publish-canary.yaml
vendored
17
.github/workflows/global-publish-canary.yaml
vendored
@@ -55,12 +55,11 @@ jobs:
|
||||
TARGET_REPO: actions-runner-controller
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
uses: peter-murray/workflow-application-token-action@d17e3a9a36850ea89f35db16c1067dd2b68ee343
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
@@ -91,11 +90,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
# https://github.com/docker/login-action/releases/tag/v3.4.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -112,19 +110,16 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
# https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
# https://github.com/docker/setup-buildx-action/releases/tag/v3.10.0
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
|
||||
with:
|
||||
version: latest
|
||||
|
||||
# Unstable builds - run at your own risk
|
||||
- name: Build and Push
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
|
||||
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
|
||||
12
.github/workflows/global-run-codeql.yaml
vendored
12
.github/workflows/global-run-codeql.yaml
vendored
@@ -25,20 +25,20 @@ jobs:
|
||||
security-events: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: go
|
||||
languages: go, actions
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@v3
|
||||
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
check_for_first_interaction:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/first-interaction@main
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
2
.github/workflows/global-run-stale.yaml
vendored
2
.github/workflows/global-run-stale.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
issues: write # for actions/stale to close stale issues
|
||||
pull-requests: write # for actions/stale to close stale PRs
|
||||
steps:
|
||||
- uses: actions/stale@v6
|
||||
- uses: actions/stale@v10
|
||||
with:
|
||||
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
|
||||
# turn off stale for both issues and PRs
|
||||
|
||||
19
.github/workflows/go.yaml
vendored
19
.github/workflows/go.yaml
vendored
@@ -29,8 +29,8 @@ jobs:
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
cache: false
|
||||
@@ -42,14 +42,13 @@ jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
cache: false
|
||||
- name: golangci-lint
|
||||
# https://github.com/golangci/golangci-lint-action/releases/tag/v7.0.0
|
||||
uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9
|
||||
with:
|
||||
only-new-issues: true
|
||||
version: v2.1.2
|
||||
@@ -57,8 +56,8 @@ jobs:
|
||||
generate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
cache: false
|
||||
@@ -70,8 +69,8 @@ jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
- run: make manifests
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
# actions-runner-controller maintainers
|
||||
* @mumoshu @toast-gear @actions/actions-launch @nikola-jokic @rentziass
|
||||
* @mumoshu @toast-gear @actions/actions-launch @actions/actions-compute @nikola-jokic @rentziass
|
||||
|
||||
7
Makefile
7
Makefile
@@ -6,7 +6,7 @@ endif
|
||||
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
||||
VERSION ?= dev
|
||||
COMMIT_SHA = $(shell git rev-parse HEAD)
|
||||
RUNNER_VERSION ?= 2.325.0
|
||||
RUNNER_VERSION ?= 2.328.0
|
||||
TARGETPLATFORM ?= $(shell arch)
|
||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||
RUNNER_TAG ?= ${VERSION}
|
||||
@@ -117,9 +117,6 @@ manifests: manifests-gen-crds chart-crds
|
||||
|
||||
manifests-gen-crds: controller-gen yq
|
||||
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
for YAMLFILE in config/crd/bases/actions*.yaml; do \
|
||||
$(YQ) '.spec.preserveUnknownFields = false' --inplace "$$YAMLFILE" ; \
|
||||
done
|
||||
make manifests-gen-crds-fix DELETE_KEY=x-kubernetes-list-type
|
||||
make manifests-gen-crds-fix DELETE_KEY=x-kubernetes-list-map-keys
|
||||
|
||||
@@ -310,7 +307,7 @@ github-release: release
|
||||
# Otherwise we get errors like the below:
|
||||
# Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property]
|
||||
#
|
||||
# Note that controller-gen newer than 0.7.0 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
|
||||
# Note that controller-gen newer than 0.8.0 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
|
||||
# Otherwise ObjectMeta embedded in Spec results in empty on the storage.
|
||||
controller-gen:
|
||||
ifeq (, $(shell which controller-gen))
|
||||
|
||||
@@ -34,6 +34,7 @@ const EphemeralRunnerContainerName = "runner"
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.jobWorkflowRef",name=JobWorkflowRef,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.workflowRunId",name=WorkflowRunId,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.jobDisplayName",name=JobDisplayName,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.jobId",name=JobId,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.message",name=Message,type=string
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||
|
||||
@@ -50,6 +51,10 @@ func (er *EphemeralRunner) IsDone() bool {
|
||||
return er.Status.Phase == corev1.PodSucceeded || er.Status.Phase == corev1.PodFailed
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) HasJob() bool {
|
||||
return len(er.Status.JobID) > 0
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) HasContainerHookConfigured() bool {
|
||||
for i := range er.Spec.Spec.Containers {
|
||||
if er.Spec.Spec.Containers[i].Name != EphemeralRunnerContainerName {
|
||||
@@ -145,8 +150,6 @@ type EphemeralRunnerStatus struct {
|
||||
RunnerId int `json:"runnerId,omitempty"`
|
||||
// +optional
|
||||
RunnerName string `json:"runnerName,omitempty"`
|
||||
// +optional
|
||||
RunnerJITConfig string `json:"runnerJITConfig,omitempty"`
|
||||
|
||||
// +optional
|
||||
Failures map[string]metav1.Time `json:"failures,omitempty"`
|
||||
@@ -154,6 +157,9 @@ type EphemeralRunnerStatus struct {
|
||||
// +optional
|
||||
JobRequestId int64 `json:"jobRequestId,omitempty"`
|
||||
|
||||
// +optional
|
||||
JobID string `json:"jobId,omitempty"`
|
||||
|
||||
// +optional
|
||||
JobRepositoryName string `json:"jobRepositoryName,omitempty"`
|
||||
|
||||
|
||||
@@ -12,306 +12,313 @@ spec:
|
||||
listKind: HorizontalRunnerAutoscalerList
|
||||
plural: horizontalrunnerautoscalers
|
||||
shortNames:
|
||||
- hra
|
||||
- hra
|
||||
singular: horizontalrunnerautoscaler
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.minReplicas
|
||||
name: Min
|
||||
type: number
|
||||
- jsonPath: .spec.maxReplicas
|
||||
name: Max
|
||||
type: number
|
||||
- jsonPath: .status.desiredReplicas
|
||||
name: Desired
|
||||
type: number
|
||||
- jsonPath: .status.scheduledOverridesSummary
|
||||
name: Schedule
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HorizontalRunnerAutoscalerSpec defines the desired state of HorizontalRunnerAutoscaler
|
||||
properties:
|
||||
capacityReservations:
|
||||
items:
|
||||
description: |-
|
||||
CapacityReservation specifies the number of replicas temporarily added
|
||||
to the scale target until ExpirationTime.
|
||||
properties:
|
||||
effectiveTime:
|
||||
format: date-time
|
||||
type: string
|
||||
expirationTime:
|
||||
format: date-time
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
replicas:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
githubAPICredentialsFrom:
|
||||
properties:
|
||||
secretRef:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: object
|
||||
maxReplicas:
|
||||
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
||||
type: integer
|
||||
metrics:
|
||||
description: Metrics is the collection of various metric targets to calculate desired number of runners
|
||||
items:
|
||||
properties:
|
||||
repositoryNames:
|
||||
description: |-
|
||||
RepositoryNames is the list of repository names to be used for calculating the metric.
|
||||
For example, a repository name is the REPO part of `github.com/USER/REPO`.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
scaleDownAdjustment:
|
||||
description: |-
|
||||
ScaleDownAdjustment is the number of runners removed on scale-down.
|
||||
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
|
||||
type: integer
|
||||
scaleDownFactor:
|
||||
description: |-
|
||||
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
|
||||
to determine how many pods should be removed.
|
||||
type: string
|
||||
scaleDownThreshold:
|
||||
description: |-
|
||||
ScaleDownThreshold is the percentage of busy runners less than which will
|
||||
trigger the hpa to scale the runners down.
|
||||
type: string
|
||||
scaleUpAdjustment:
|
||||
description: |-
|
||||
ScaleUpAdjustment is the number of runners added on scale-up.
|
||||
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
||||
type: integer
|
||||
scaleUpFactor:
|
||||
description: |-
|
||||
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
|
||||
to determine how many pods should be added.
|
||||
type: string
|
||||
scaleUpThreshold:
|
||||
description: |-
|
||||
ScaleUpThreshold is the percentage of busy runners greater than which will
|
||||
trigger the hpa to scale runners up.
|
||||
type: string
|
||||
type:
|
||||
description: |-
|
||||
Type is the type of metric to be used for autoscaling.
|
||||
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
minReplicas:
|
||||
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
|
||||
type: integer
|
||||
scaleDownDelaySecondsAfterScaleOut:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.minReplicas
|
||||
name: Min
|
||||
type: number
|
||||
- jsonPath: .spec.maxReplicas
|
||||
name: Max
|
||||
type: number
|
||||
- jsonPath: .status.desiredReplicas
|
||||
name: Desired
|
||||
type: number
|
||||
- jsonPath: .status.scheduledOverridesSummary
|
||||
name: Schedule
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
|
||||
API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HorizontalRunnerAutoscalerSpec defines the desired state
|
||||
of HorizontalRunnerAutoscaler
|
||||
properties:
|
||||
capacityReservations:
|
||||
items:
|
||||
description: |-
|
||||
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
|
||||
Used to prevent flapping (down->up->down->... loop)
|
||||
type: integer
|
||||
scaleTargetRef:
|
||||
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
||||
CapacityReservation specifies the number of replicas temporarily added
|
||||
to the scale target until ExpirationTime.
|
||||
properties:
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced
|
||||
enum:
|
||||
- RunnerDeployment
|
||||
- RunnerSet
|
||||
effectiveTime:
|
||||
format: date-time
|
||||
type: string
|
||||
expirationTime:
|
||||
format: date-time
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
replicas:
|
||||
type: integer
|
||||
type: object
|
||||
scaleUpTriggers:
|
||||
description: |-
|
||||
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
|
||||
on each webhook requested received by the webhookBasedAutoscaler.
|
||||
|
||||
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
|
||||
|
||||
Note that the added runners remain until the next sync period at least,
|
||||
and they may or may not be used by GitHub Actions depending on the timing.
|
||||
They are intended to be used to gain "resource slack" immediately after you
|
||||
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
|
||||
items:
|
||||
type: array
|
||||
githubAPICredentialsFrom:
|
||||
properties:
|
||||
secretRef:
|
||||
properties:
|
||||
amount:
|
||||
type: integer
|
||||
duration:
|
||||
type: string
|
||||
githubEvent:
|
||||
properties:
|
||||
checkRun:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||
properties:
|
||||
names:
|
||||
description: |-
|
||||
Names is a list of GitHub Actions glob patterns.
|
||||
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
|
||||
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
|
||||
So it is very likely that you can utilize this to trigger depending on the job.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
repositories:
|
||||
description: |-
|
||||
Repositories is a list of GitHub repositories.
|
||||
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
status:
|
||||
type: string
|
||||
types:
|
||||
description: 'One of: created, rerequested, or completed'
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
pullRequest:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||
properties:
|
||||
branches:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
types:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
push:
|
||||
description: |-
|
||||
PushSpec is the condition for triggering scale-up on push event
|
||||
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||
type: object
|
||||
workflowJob:
|
||||
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
scheduledOverrides:
|
||||
description: |-
|
||||
ScheduledOverrides is the list of ScheduledOverride.
|
||||
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||
The earlier a scheduled override is, the higher it is prioritized.
|
||||
items:
|
||||
description: |-
|
||||
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
||||
properties:
|
||||
endTime:
|
||||
description: EndTime is the time at which the first override ends.
|
||||
format: date-time
|
||||
type: string
|
||||
minReplicas:
|
||||
description: |-
|
||||
MinReplicas is the number of runners while overriding.
|
||||
If omitted, it doesn't override minReplicas.
|
||||
minimum: 0
|
||||
nullable: true
|
||||
type: integer
|
||||
recurrenceRule:
|
||||
properties:
|
||||
frequency:
|
||||
description: |-
|
||||
Frequency is the name of a predefined interval of each recurrence.
|
||||
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
|
||||
If empty, the corresponding override happens only once.
|
||||
enum:
|
||||
- Daily
|
||||
- Weekly
|
||||
- Monthly
|
||||
- Yearly
|
||||
type: string
|
||||
untilTime:
|
||||
description: |-
|
||||
UntilTime is the time of the final recurrence.
|
||||
If empty, the schedule recurs forever.
|
||||
format: date-time
|
||||
type: string
|
||||
type: object
|
||||
startTime:
|
||||
description: StartTime is the time at which the first override starts.
|
||||
format: date-time
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- endTime
|
||||
- startTime
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
status:
|
||||
properties:
|
||||
cacheEntries:
|
||||
items:
|
||||
properties:
|
||||
expirationTime:
|
||||
format: date-time
|
||||
type: object
|
||||
maxReplicas:
|
||||
description: MaxReplicas is the maximum number of replicas the deployment
|
||||
is allowed to scale
|
||||
type: integer
|
||||
metrics:
|
||||
description: Metrics is the collection of various metric targets to
|
||||
calculate desired number of runners
|
||||
items:
|
||||
properties:
|
||||
repositoryNames:
|
||||
description: |-
|
||||
RepositoryNames is the list of repository names to be used for calculating the metric.
|
||||
For example, a repository name is the REPO part of `github.com/USER/REPO`.
|
||||
items:
|
||||
type: string
|
||||
key:
|
||||
type: string
|
||||
value:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
desiredReplicas:
|
||||
type: array
|
||||
scaleDownAdjustment:
|
||||
description: |-
|
||||
ScaleDownAdjustment is the number of runners removed on scale-down.
|
||||
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
|
||||
type: integer
|
||||
scaleDownFactor:
|
||||
description: |-
|
||||
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
|
||||
to determine how many pods should be removed.
|
||||
type: string
|
||||
scaleDownThreshold:
|
||||
description: |-
|
||||
ScaleDownThreshold is the percentage of busy runners less than which will
|
||||
trigger the hpa to scale the runners down.
|
||||
type: string
|
||||
scaleUpAdjustment:
|
||||
description: |-
|
||||
ScaleUpAdjustment is the number of runners added on scale-up.
|
||||
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
||||
type: integer
|
||||
scaleUpFactor:
|
||||
description: |-
|
||||
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
|
||||
to determine how many pods should be added.
|
||||
type: string
|
||||
scaleUpThreshold:
|
||||
description: |-
|
||||
ScaleUpThreshold is the percentage of busy runners greater than which will
|
||||
trigger the hpa to scale runners up.
|
||||
type: string
|
||||
type:
|
||||
description: |-
|
||||
Type is the type of metric to be used for autoscaling.
|
||||
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
minReplicas:
|
||||
description: MinReplicas is the minimum number of replicas the deployment
|
||||
is allowed to scale
|
||||
type: integer
|
||||
scaleDownDelaySecondsAfterScaleOut:
|
||||
description: |-
|
||||
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
|
||||
Used to prevent flapping (down->up->down->... loop)
|
||||
type: integer
|
||||
scaleTargetRef:
|
||||
description: ScaleTargetRef is the reference to scaled resource like
|
||||
RunnerDeployment
|
||||
properties:
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced
|
||||
enum:
|
||||
- RunnerDeployment
|
||||
- RunnerSet
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
type: object
|
||||
scaleUpTriggers:
|
||||
description: |-
|
||||
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
|
||||
on each webhook requested received by the webhookBasedAutoscaler.
|
||||
|
||||
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
|
||||
|
||||
Note that the added runners remain until the next sync period at least,
|
||||
and they may or may not be used by GitHub Actions depending on the timing.
|
||||
They are intended to be used to gain "resource slack" immediately after you
|
||||
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
|
||||
items:
|
||||
properties:
|
||||
amount:
|
||||
type: integer
|
||||
duration:
|
||||
type: string
|
||||
githubEvent:
|
||||
properties:
|
||||
checkRun:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||
properties:
|
||||
names:
|
||||
description: |-
|
||||
Names is a list of GitHub Actions glob patterns.
|
||||
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
|
||||
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
|
||||
So it is very likely that you can utilize this to trigger depending on the job.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
repositories:
|
||||
description: |-
|
||||
Repositories is a list of GitHub repositories.
|
||||
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
status:
|
||||
type: string
|
||||
types:
|
||||
description: 'One of: created, rerequested, or completed'
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
pullRequest:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||
properties:
|
||||
branches:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
types:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
push:
|
||||
description: |-
|
||||
PushSpec is the condition for triggering scale-up on push event
|
||||
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||
type: object
|
||||
workflowJob:
|
||||
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
scheduledOverrides:
|
||||
description: |-
|
||||
ScheduledOverrides is the list of ScheduledOverride.
|
||||
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||
The earlier a scheduled override is, the higher it is prioritized.
|
||||
items:
|
||||
description: |-
|
||||
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
|
||||
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
||||
type: integer
|
||||
lastSuccessfulScaleOutTime:
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
|
||||
RunnerDeployment's generation, which is updated on mutation by the API Server.
|
||||
format: int64
|
||||
type: integer
|
||||
scheduledOverridesSummary:
|
||||
description: |-
|
||||
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
|
||||
for observability.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
||||
properties:
|
||||
endTime:
|
||||
description: EndTime is the time at which the first override
|
||||
ends.
|
||||
format: date-time
|
||||
type: string
|
||||
minReplicas:
|
||||
description: |-
|
||||
MinReplicas is the number of runners while overriding.
|
||||
If omitted, it doesn't override minReplicas.
|
||||
minimum: 0
|
||||
nullable: true
|
||||
type: integer
|
||||
recurrenceRule:
|
||||
properties:
|
||||
frequency:
|
||||
description: |-
|
||||
Frequency is the name of a predefined interval of each recurrence.
|
||||
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
|
||||
If empty, the corresponding override happens only once.
|
||||
enum:
|
||||
- Daily
|
||||
- Weekly
|
||||
- Monthly
|
||||
- Yearly
|
||||
type: string
|
||||
untilTime:
|
||||
description: |-
|
||||
UntilTime is the time of the final recurrence.
|
||||
If empty, the schedule recurs forever.
|
||||
format: date-time
|
||||
type: string
|
||||
type: object
|
||||
startTime:
|
||||
description: StartTime is the time at which the first override
|
||||
starts.
|
||||
format: date-time
|
||||
type: string
|
||||
required:
|
||||
- endTime
|
||||
- startTime
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
status:
|
||||
properties:
|
||||
cacheEntries:
|
||||
items:
|
||||
properties:
|
||||
expirationTime:
|
||||
format: date-time
|
||||
type: string
|
||||
key:
|
||||
type: string
|
||||
value:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
desiredReplicas:
|
||||
description: |-
|
||||
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
|
||||
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
||||
type: integer
|
||||
lastSuccessfulScaleOutTime:
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
|
||||
RunnerDeployment's generation, which is updated on mutation by the API Server.
|
||||
format: int64
|
||||
type: integer
|
||||
scheduledOverridesSummary:
|
||||
description: |-
|
||||
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
|
||||
for observability.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
|
||||
@@ -9348,4 +9348,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -9322,4 +9322,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -9328,4 +9328,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -8389,4 +8389,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.12.0
|
||||
version: 0.12.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.12.0"
|
||||
appVersion: "0.12.1"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -15571,4 +15571,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -36,6 +36,9 @@ spec:
|
||||
- jsonPath: .status.jobDisplayName
|
||||
name: JobDisplayName
|
||||
type: string
|
||||
- jsonPath: .status.jobId
|
||||
name: JobId
|
||||
type: string
|
||||
- jsonPath: .status.message
|
||||
name: Message
|
||||
type: string
|
||||
@@ -7846,6 +7849,8 @@ spec:
|
||||
type: object
|
||||
jobDisplayName:
|
||||
type: string
|
||||
jobId:
|
||||
type: string
|
||||
jobRepositoryName:
|
||||
type: string
|
||||
jobRequestId:
|
||||
@@ -7874,8 +7879,6 @@ spec:
|
||||
type: string
|
||||
runnerId:
|
||||
type: integer
|
||||
runnerJITConfig:
|
||||
type: string
|
||||
runnerName:
|
||||
type: string
|
||||
workflowRunId:
|
||||
@@ -7887,4 +7890,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -7859,4 +7859,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -129,11 +129,3 @@ Create the name of the service account to use
|
||||
{{- define "gha-runner-scale-set-controller.leaderElectionRoleBinding" -}}
|
||||
{{- include "gha-runner-scale-set-controller.fullname" . }}-leader-election
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set-controller.imagePullSecretsNames" -}}
|
||||
{{- $names := list }}
|
||||
{{- range $k, $v := . }}
|
||||
{{- $names = append $names $v.name }}
|
||||
{{- end }}
|
||||
{{- $names | join ","}}
|
||||
{{- end }}
|
||||
|
||||
@@ -54,7 +54,9 @@ spec:
|
||||
- "--leader-election-id={{ include "gha-runner-scale-set-controller.fullname" . }}"
|
||||
{{- end }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
- "--auto-scaler-image-pull-secrets={{ include "gha-runner-scale-set-controller.imagePullSecretsNames" . }}"
|
||||
{{- range . }}
|
||||
- "--auto-scaler-image-pull-secrets={{- .name -}}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.flags.logLevel }}
|
||||
- "--log-level={{ . }}"
|
||||
|
||||
@@ -683,7 +683,8 @@ func TestTemplate_ControllerDeployment_ForwardImagePullSecrets(t *testing.T) {
|
||||
|
||||
expectedArgs := []string{
|
||||
"--auto-scaling-runner-set-only",
|
||||
"--auto-scaler-image-pull-secrets=dockerhub,ghcr",
|
||||
"--auto-scaler-image-pull-secrets=dockerhub",
|
||||
"--auto-scaler-image-pull-secrets=ghcr",
|
||||
"--log-level=debug",
|
||||
"--log-format=text",
|
||||
"--update-strategy=immediate",
|
||||
@@ -1079,6 +1080,7 @@ func TestDeployment_excludeLabelPropagationPrefixes(t *testing.T) {
|
||||
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=prefix.com/")
|
||||
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=complete.io/label")
|
||||
}
|
||||
|
||||
func TestNamespaceOverride(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.12.0
|
||||
version: 0.12.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.12.0"
|
||||
appVersion: "0.12.1"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -62,12 +62,12 @@ app.kubernetes.io/instance: {{ include "gha-runner-scale-set.scale-set-name" . }
|
||||
{{- fail "Values.githubConfigSecret is required for setting auth with GitHub server." }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-github-secret
|
||||
{{- include "gha-runner-scale-set.fullname" . | replace "_" "-" }}-github-secret
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.noPermissionServiceAccountName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-no-permission
|
||||
{{- include "gha-runner-scale-set.fullname" . | replace "_" "-" }}-no-permission
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubeModeRoleName" -}}
|
||||
@@ -79,7 +79,7 @@ app.kubernetes.io/instance: {{ include "gha-runner-scale-set.scale-set-name" . }
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode
|
||||
{{- include "gha-runner-scale-set.fullname" . | replace "_" "-" }}-kube-mode
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.dind-init-container" -}}
|
||||
@@ -113,9 +113,9 @@ startupProbe:
|
||||
command:
|
||||
- docker
|
||||
- info
|
||||
initialDelaySeconds: 0
|
||||
failureThreshold: 24
|
||||
periodSeconds: 5
|
||||
initialDelaySeconds: 0
|
||||
failureThreshold: 24
|
||||
periodSeconds: 5
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: work
|
||||
@@ -377,6 +377,101 @@ volumeMounts:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubernetes-novolume-mode-runner-container" -}}
|
||||
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
||||
{{- range $i, $container := .Values.template.spec.containers }}
|
||||
{{- if eq $container.name "runner" }}
|
||||
{{- $setRunnerImage := "" }}
|
||||
{{- range $key, $val := $container }}
|
||||
{{- if and (ne $key "env") (ne $key "volumeMounts") (ne $key "name") }}
|
||||
{{- if eq $key "image" }}
|
||||
{{- $setRunnerImage = $val }}
|
||||
{{- end }}
|
||||
{{ $key }}: {{ $val | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $setContainerHooks := 1 }}
|
||||
{{- $setPodName := 1 }}
|
||||
{{- $setRequireJobContainer := 1 }}
|
||||
{{- $setActionsRunnerImage := 1 }}
|
||||
{{- $setNodeExtraCaCerts := 0 }}
|
||||
{{- $setRunnerUpdateCaCerts := 0 }}
|
||||
{{- if $tlsConfig.runnerMountPath }}
|
||||
{{- $setNodeExtraCaCerts = 1 }}
|
||||
{{- $setRunnerUpdateCaCerts = 1 }}
|
||||
{{- end }}
|
||||
env:
|
||||
{{- with $container.env }}
|
||||
{{- range $i, $env := . }}
|
||||
{{- if eq $env.name "ACTIONS_RUNNER_CONTAINER_HOOKS" }}
|
||||
{{- $setContainerHooks = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "ACTIONS_RUNNER_IMAGE" }}
|
||||
{{- $setActionsRunnerImage = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "ACTIONS_RUNNER_POD_NAME" }}
|
||||
{{- $setPodName = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER" }}
|
||||
{{- $setRequireJobContainer = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "NODE_EXTRA_CA_CERTS" }}
|
||||
{{- $setNodeExtraCaCerts = 0 }}
|
||||
{{- end }}
|
||||
{{- if eq $env.name "RUNNER_UPDATE_CA_CERTS" }}
|
||||
{{- $setRunnerUpdateCaCerts = 0 }}
|
||||
{{- end }}
|
||||
- {{ $env | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $setContainerHooks }}
|
||||
- name: ACTIONS_RUNNER_CONTAINER_HOOKS
|
||||
value: /home/runner/k8s-novolume/index.js
|
||||
{{- end }}
|
||||
{{- if $setPodName }}
|
||||
- name: ACTIONS_RUNNER_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
{{- end }}
|
||||
{{- if $setRequireJobContainer }}
|
||||
- name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER
|
||||
value: "true"
|
||||
{{- end }}
|
||||
{{- if $setActionsRunnerImage }}
|
||||
- name: ACTIONS_RUNNER_IMAGE
|
||||
value: "{{- $setRunnerImage -}}"
|
||||
{{- end }}
|
||||
{{- if $setNodeExtraCaCerts }}
|
||||
- name: NODE_EXTRA_CA_CERTS
|
||||
value: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
|
||||
{{- end }}
|
||||
{{- if $setRunnerUpdateCaCerts }}
|
||||
- name: RUNNER_UPDATE_CA_CERTS
|
||||
value: "1"
|
||||
{{- end }}
|
||||
{{- $mountGitHubServerTLS := 0 }}
|
||||
{{- if $tlsConfig.runnerMountPath }}
|
||||
{{- $mountGitHubServerTLS = 1 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
{{- with $container.volumeMounts }}
|
||||
{{- range $i, $volMount := . }}
|
||||
{{- if eq $volMount.name "github-server-tls-cert" }}
|
||||
{{- $mountGitHubServerTLS = 0 }}
|
||||
{{- end }}
|
||||
- {{ $volMount | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $mountGitHubServerTLS }}
|
||||
- name: github-server-tls-cert
|
||||
mountPath: {{ clean (print $tlsConfig.runnerMountPath "/" $tlsConfig.certificateFrom.configMapKeyRef.key) }}
|
||||
subPath: {{ $tlsConfig.certificateFrom.configMapKeyRef.key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.default-mode-runner-containers" -}}
|
||||
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
||||
{{- range $i, $container := .Values.template.spec.containers }}
|
||||
|
||||
@@ -8,7 +8,7 @@ metadata:
|
||||
{{- if gt (len (include "gha-runner-scale-set.namespace" .)) 63 }}
|
||||
{{ fail "Namespace must have up to 63 characters" }}
|
||||
{{- end }}
|
||||
name: {{ include "gha-runner-scale-set.scale-set-name" . }}
|
||||
name: {{ include "gha-runner-scale-set.scale-set-name" . | replace "_" "-" }}
|
||||
namespace: {{ include "gha-runner-scale-set.namespace" . }}
|
||||
labels:
|
||||
{{- with .Values.labels }}
|
||||
@@ -37,12 +37,12 @@ metadata:
|
||||
{{- end }}
|
||||
actions.github.com/cleanup-manager-role-binding: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
|
||||
actions.github.com/cleanup-manager-role-name: {{ include "gha-runner-scale-set.managerRoleName" . }}
|
||||
{{- if and $containerMode (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
{{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
|
||||
actions.github.com/cleanup-kubernetes-mode-role-binding-name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
|
||||
actions.github.com/cleanup-kubernetes-mode-role-name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||
actions.github.com/cleanup-kubernetes-mode-service-account-name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
|
||||
{{- end }}
|
||||
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
{{- if and (ne $containerMode.type "kubernetes") (ne $containerMode.type "kubernetes-novolume") (not .Values.template.spec.serviceAccountName) }}
|
||||
actions.github.com/cleanup-no-permission-service-account-name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
|
||||
{{- end }}
|
||||
|
||||
@@ -157,7 +157,7 @@ spec:
|
||||
restartPolicy: Never
|
||||
{{- end }}
|
||||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- if eq $containerMode.type "kubernetes" }}
|
||||
{{- if or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume") }}
|
||||
serviceAccountName: {{ default (include "gha-runner-scale-set.kubeModeServiceAccountName" .) .Values.template.spec.serviceAccountName }}
|
||||
{{- else }}
|
||||
serviceAccountName: {{ default (include "gha-runner-scale-set.noPermissionServiceAccountName" .) .Values.template.spec.serviceAccountName }}
|
||||
@@ -166,11 +166,11 @@ spec:
|
||||
initContainers:
|
||||
{{- if eq $containerMode.type "dind" }}
|
||||
- name: init-dind-externals
|
||||
{{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if (ge (.Capabilities.KubeVersion.Minor | int) 29) }}
|
||||
{{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }}
|
||||
{{- if (ge (.Capabilities.KubeVersion.Minor | int) 29) }}
|
||||
- name: dind
|
||||
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
|
||||
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.template.spec.initContainers }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
@@ -189,11 +189,15 @@ spec:
|
||||
- name: runner
|
||||
{{- include "gha-runner-scale-set.kubernetes-mode-runner-container" . | nindent 8 }}
|
||||
{{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
|
||||
{{- else if eq $containerMode.type "kubernetes-novolume" }}
|
||||
- name: runner
|
||||
{{- include "gha-runner-scale-set.kubernetes-novolume-mode-runner-container" . | nindent 8 }}
|
||||
{{- include "gha-runner-scale-set.non-runner-containers" . | nindent 6 }}
|
||||
{{- else }}
|
||||
{{- include "gha-runner-scale-set.default-mode-runner-containers" . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- $tlsConfig := (default (dict) .Values.githubServerTLS) }}
|
||||
{{- if or .Values.template.spec.volumes (eq $containerMode.type "dind") (eq $containerMode.type "kubernetes") $tlsConfig.runnerMountPath }}
|
||||
{{- if or .Values.template.spec.volumes (eq $containerMode.type "dind") (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume") $tlsConfig.runnerMountPath }}
|
||||
volumes:
|
||||
{{- if $tlsConfig.runnerMountPath }}
|
||||
{{- include "gha-runner-scale-set.tls-volume" $tlsConfig | nindent 6 }}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeRole) }}
|
||||
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
{{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
|
||||
# default permission for runner pod service account in kubernetes mode (container hook)
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
@@ -38,9 +38,11 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/log"]
|
||||
verbs: ["get", "list", "watch",]
|
||||
{{- if ne $containerMode.type "kubernetes-novolume" }}
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
{{- end }}
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeRoleBinding) }}
|
||||
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
{{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- $hasCustomResourceMeta := (and .Values.resourceMeta .Values.resourceMeta.kubernetesModeServiceAccount) }}
|
||||
{{- if and (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
{{- if and (or (eq $containerMode.type "kubernetes") (eq $containerMode.type "kubernetes-novolume")) (not .Values.template.spec.serviceAccountName) }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
|
||||
@@ -204,7 +204,6 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
|
||||
|
||||
func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
@@ -270,6 +269,72 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
|
||||
}
|
||||
|
||||
func TestTemplateRenderedSetServiceAccountToKubeNoVolumeMode(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"containerMode.type": "kubernetes-novolume",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_serviceaccount.yaml"})
|
||||
var serviceAccount corev1.ServiceAccount
|
||||
helm.UnmarshalK8SYaml(t, output, &serviceAccount)
|
||||
|
||||
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", serviceAccount.Name)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
|
||||
var role rbacv1.Role
|
||||
helm.UnmarshalK8SYaml(t, output, &role)
|
||||
|
||||
assert.Equal(t, namespaceName, role.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", role.Name)
|
||||
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", role.Finalizers[0])
|
||||
|
||||
assert.Len(t, role.Rules, 4, "kube mode role should have 4 rules")
|
||||
assert.Equal(t, "pods", role.Rules[0].Resources[0])
|
||||
assert.Equal(t, "pods/exec", role.Rules[1].Resources[0])
|
||||
assert.Equal(t, "pods/log", role.Rules[2].Resources[0])
|
||||
assert.Equal(t, "secrets", role.Rules[3].Resources[0])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role_binding.yaml"})
|
||||
var roleBinding rbacv1.RoleBinding
|
||||
helm.UnmarshalK8SYaml(t, output, &roleBinding)
|
||||
|
||||
assert.Equal(t, namespaceName, roleBinding.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.Name)
|
||||
assert.Len(t, roleBinding.Subjects, 1)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace)
|
||||
assert.Equal(t, "test-runners-gha-rs-kube-mode", roleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "Role", roleBinding.RoleRef.Kind)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
expectedServiceAccountName := "test-runners-gha-rs-kube-mode"
|
||||
assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
|
||||
}
|
||||
|
||||
func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -961,6 +1026,65 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesMode(t *testing.T)
|
||||
assert.NotNil(t, ars.Spec.Template.Spec.Volumes[0].Ephemeral, "Template.Spec should have 1 ephemeral volume")
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoScalingRunnerSet_EnableKubernetesModeNoVolume(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"containerMode.type": "kubernetes-novolume",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Equal(t, namespaceName, ars.Namespace)
|
||||
assert.Equal(t, "test-runners", ars.Name)
|
||||
|
||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/name"])
|
||||
assert.Equal(t, "test-runners", ars.Labels["app.kubernetes.io/instance"])
|
||||
assert.Equal(t, "https://github.com/actions", ars.Spec.GitHubConfigUrl)
|
||||
assert.Equal(t, "test-runners-gha-rs-github-secret", ars.Spec.GitHubConfigSecret)
|
||||
|
||||
assert.Empty(t, ars.Spec.RunnerGroup, "RunnerGroup should be empty")
|
||||
assert.Nil(t, ars.Spec.MinRunners, "MinRunners should be nil")
|
||||
assert.Nil(t, ars.Spec.MaxRunners, "MaxRunners should be nil")
|
||||
assert.Nil(t, ars.Spec.Proxy, "Proxy should be nil")
|
||||
assert.Nil(t, ars.Spec.GitHubServerTLS, "GitHubServerTLS should be nil")
|
||||
|
||||
assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil")
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container")
|
||||
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
|
||||
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
|
||||
|
||||
require.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 4, "The runner container should have 4 env vars")
|
||||
assert.Equal(t, "ACTIONS_RUNNER_CONTAINER_HOOKS", ars.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||
assert.Equal(t, "/home/runner/k8s-novolume/index.js", ars.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||
assert.Equal(t, "ACTIONS_RUNNER_POD_NAME", ars.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||
assert.Equal(t, "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER", ars.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||
assert.Equal(t, "true", ars.Spec.Template.Spec.Containers[0].Env[2].Value)
|
||||
assert.Equal(t, "ACTIONS_RUNNER_IMAGE", ars.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||
assert.Equal(t, ars.Spec.Template.Spec.Containers[0].Image, ars.Spec.Template.Spec.Containers[0].Env[3].Value)
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.Volumes, 0, "Template.Spec should have 0 volumes")
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoscalingRunnerSet_ListenerPodTemplate(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -1140,7 +1264,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("providing githubServerTLS.runnerMountPath", func(t *testing.T) {
|
||||
t.Run("mode: default", func(t *testing.T) {
|
||||
t.Run("mode default", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
@@ -1199,7 +1323,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("mode: dind", func(t *testing.T) {
|
||||
t.Run("mode dind", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
@@ -1259,7 +1383,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("mode: kubernetes", func(t *testing.T) {
|
||||
t.Run("mode kubernetes", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
@@ -1318,10 +1442,70 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
Value: "1",
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("mode kubernetes-novolume", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secrets",
|
||||
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
|
||||
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
||||
"githubServerTLS.runnerMountPath": "/runner/mount/path",
|
||||
"containerMode.type": "kubernetes-novolume",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "certs-configmap",
|
||||
},
|
||||
Key: "cert.pem",
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expected, ars.Spec.GitHubServerTLS)
|
||||
|
||||
var volume *corev1.Volume
|
||||
for _, v := range ars.Spec.Template.Spec.Volumes {
|
||||
if v.Name == "github-server-tls-cert" {
|
||||
volume = &v
|
||||
break
|
||||
}
|
||||
}
|
||||
require.NotNil(t, volume)
|
||||
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
|
||||
|
||||
assert.Contains(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
|
||||
Name: "github-server-tls-cert",
|
||||
MountPath: "/runner/mount/path/cert.pem",
|
||||
SubPath: "cert.pem",
|
||||
})
|
||||
|
||||
assert.Contains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
|
||||
Name: "NODE_EXTRA_CA_CERTS",
|
||||
Value: "/runner/mount/path/cert.pem",
|
||||
})
|
||||
|
||||
assert.Contains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
|
||||
Name: "RUNNER_UPDATE_CA_CERTS",
|
||||
Value: "1",
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("without providing githubServerTLS.runnerMountPath", func(t *testing.T) {
|
||||
t.Run("mode: default", func(t *testing.T) {
|
||||
t.Run("mode default", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
@@ -1376,7 +1560,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("mode: dind", func(t *testing.T) {
|
||||
t.Run("mode dind", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
@@ -1432,7 +1616,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("mode: kubernetes", func(t *testing.T) {
|
||||
t.Run("mode kubernetes", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
@@ -1487,6 +1671,62 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
Value: "1",
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("mode kubernetes-novolume", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secrets",
|
||||
"githubServerTLS.certificateFrom.configMapKeyRef.name": "certs-configmap",
|
||||
"githubServerTLS.certificateFrom.configMapKeyRef.key": "cert.pem",
|
||||
"containerMode.type": "kubernetes-novolume",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: "certs-configmap",
|
||||
},
|
||||
Key: "cert.pem",
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, expected, ars.Spec.GitHubServerTLS)
|
||||
|
||||
var volume *corev1.Volume
|
||||
for _, v := range ars.Spec.Template.Spec.Volumes {
|
||||
if v.Name == "github-server-tls-cert" {
|
||||
volume = &v
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Nil(t, volume)
|
||||
|
||||
assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
|
||||
Name: "github-server-tls-cert",
|
||||
MountPath: "/runner/mount/path/cert.pem",
|
||||
SubPath: "cert.pem",
|
||||
})
|
||||
|
||||
assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
|
||||
Name: "NODE_EXTRA_CA_CERTS",
|
||||
Value: "/runner/mount/path/cert.pem",
|
||||
})
|
||||
|
||||
assert.NotContains(t, ars.Spec.Template.Spec.Containers[0].Env, corev1.EnvVar{
|
||||
Name: "RUNNER_UPDATE_CA_CERTS",
|
||||
Value: "1",
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1951,40 +2191,44 @@ func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing.
|
||||
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
for _, mode := range []string{"kubernetes", "kubernetes-novolume"} {
|
||||
t.Run("containerMode "+mode, func(t *testing.T) {
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
"containerMode.type": "kubernetes",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
"containerMode.type": mode,
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||
|
||||
annotationValues := map[string]string{
|
||||
actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-rs-github-secret",
|
||||
actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-rs-manager",
|
||||
actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-rs-manager",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-rs-kube-mode",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-rs-kube-mode",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-rs-kube-mode",
|
||||
}
|
||||
annotationValues := map[string]string{
|
||||
actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-rs-github-secret",
|
||||
actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-rs-manager",
|
||||
actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-rs-manager",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-rs-kube-mode",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-rs-kube-mode",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-rs-kube-mode",
|
||||
}
|
||||
|
||||
for annotation, value := range annotationValues {
|
||||
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
|
||||
for annotation, value := range annotationValues {
|
||||
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2416,6 +2660,21 @@ func TestNamespaceOverride(t *testing.T) {
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
|
||||
},
|
||||
},
|
||||
"kube_novolume_mode_role": {
|
||||
file: "kube_mode_role.yaml",
|
||||
options: &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"namespaceOverride": namespaceOverride,
|
||||
"containerMode.type": "kubernetes-novolume",
|
||||
"controllerServiceAccount.name": "foo",
|
||||
"controllerServiceAccount.namespace": "bar",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"githubConfigUrl": "https://github.com",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
|
||||
},
|
||||
},
|
||||
"kube_mode_role_binding": {
|
||||
file: "kube_mode_role_binding.yaml",
|
||||
options: &helm.Options{
|
||||
@@ -2431,6 +2690,21 @@ func TestNamespaceOverride(t *testing.T) {
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
|
||||
},
|
||||
},
|
||||
"kube_novolume_mode_role_binding": {
|
||||
file: "kube_mode_role_binding.yaml",
|
||||
options: &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"namespaceOverride": namespaceOverride,
|
||||
"containerMode.type": "kubernetes-novolume",
|
||||
"controllerServiceAccount.name": "foo",
|
||||
"controllerServiceAccount.namespace": "bar",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"githubConfigUrl": "https://github.com",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
|
||||
},
|
||||
},
|
||||
"kube_mode_serviceaccount": {
|
||||
file: "kube_mode_serviceaccount.yaml",
|
||||
options: &helm.Options{
|
||||
@@ -2446,6 +2720,21 @@ func TestNamespaceOverride(t *testing.T) {
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
|
||||
},
|
||||
},
|
||||
"kube_novolume_mode_serviceaccount": {
|
||||
file: "kube_mode_serviceaccount.yaml",
|
||||
options: &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"namespaceOverride": namespaceOverride,
|
||||
"containerMode.type": "kubernetes-novolume",
|
||||
"controllerServiceAccount.name": "foo",
|
||||
"controllerServiceAccount.namespace": "bar",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"githubConfigUrl": "https://github.com",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", releaseNamespace),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range tt {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
## githubConfigUrl is the GitHub url for where you want to configure runners
|
||||
## ex: https://github.com/myorg/myrepo or https://github.com/myorg
|
||||
## ex: https://github.com/myorg/myrepo or https://github.com/myorg or https://github.com/enterprises/myenterprise
|
||||
githubConfigUrl: ""
|
||||
|
||||
## githubConfigSecret is the k8s secret information to use when authenticating via the GitHub API.
|
||||
@@ -115,7 +115,7 @@ githubConfigSecret:
|
||||
## If any customization is required for dind or kubernetes mode, containerMode should remain
|
||||
## empty, and configuration should be applied to the template.
|
||||
# containerMode:
|
||||
# type: "dind" ## type can be set to dind or kubernetes
|
||||
# type: "dind" ## type can be set to "dind", "kubernetes", or "kubernetes-novolume"
|
||||
# ## the following is required when containerMode.type=kubernetes
|
||||
# kubernetesModeWorkVolumeClaim:
|
||||
# accessModes: ["ReadWriteOnce"]
|
||||
@@ -154,7 +154,7 @@ githubConfigSecret:
|
||||
# counters:
|
||||
# gha_started_jobs_total:
|
||||
# labels:
|
||||
# ["repository", "organization", "enterprise", "job_name", "event_name", "job_workflow_ref"]
|
||||
# ["repository", "organization", "enterprise", "job_name", "event_name", "job_workflow_ref", "job_workflow_name", "job_workflow_target"]
|
||||
# gha_completed_jobs_total:
|
||||
# labels:
|
||||
# [
|
||||
@@ -165,6 +165,8 @@ githubConfigSecret:
|
||||
# "event_name",
|
||||
# "job_result",
|
||||
# "job_workflow_ref",
|
||||
# "job_workflow_name",
|
||||
# "job_workflow_target",
|
||||
# ]
|
||||
# gauges:
|
||||
# gha_assigned_jobs:
|
||||
@@ -186,7 +188,7 @@ githubConfigSecret:
|
||||
# histograms:
|
||||
# gha_job_startup_duration_seconds:
|
||||
# labels:
|
||||
# ["repository", "organization", "enterprise", "job_name", "event_name","job_workflow_ref"]
|
||||
# ["repository", "organization", "enterprise", "job_name", "event_name","job_workflow_ref", "job_workflow_name", "job_workflow_target"]
|
||||
# buckets:
|
||||
# [
|
||||
# 0.01,
|
||||
@@ -244,7 +246,9 @@ githubConfigSecret:
|
||||
# "job_name",
|
||||
# "event_name",
|
||||
# "job_result",
|
||||
# "job_workflow_ref"
|
||||
# "job_workflow_ref",
|
||||
# "job_workflow_name",
|
||||
# "job_workflow_target"
|
||||
# ]
|
||||
# buckets:
|
||||
# [
|
||||
@@ -326,9 +330,16 @@ template:
|
||||
## command:
|
||||
## - docker
|
||||
## - info
|
||||
## initialDelaySeconds: 0
|
||||
## failureThreshold: 24
|
||||
## periodSeconds: 5
|
||||
## initialDelaySeconds: 0
|
||||
## failureThreshold: 24
|
||||
## periodSeconds: 5
|
||||
## volumeMounts:
|
||||
## - name: work
|
||||
## mountPath: /home/runner/_work
|
||||
## - name: dind-sock
|
||||
## mountPath: /var/run
|
||||
## - name: dind-externals
|
||||
## mountPath: /home/runner/externals
|
||||
## containers:
|
||||
## - name: runner
|
||||
## image: ghcr.io/actions/actions-runner:latest
|
||||
@@ -380,6 +391,25 @@ template:
|
||||
## resources:
|
||||
## requests:
|
||||
## storage: 1Gi
|
||||
######################################################################################################
|
||||
## with containerMode.type=kubernetes-novolume, we will populate the template.spec with following pod spec
|
||||
## template:
|
||||
## spec:
|
||||
## containers:
|
||||
## - name: runner
|
||||
## image: ghcr.io/actions/actions-runner:latest
|
||||
## command: ["/home/runner/run.sh"]
|
||||
## env:
|
||||
## - name: ACTIONS_RUNNER_CONTAINER_HOOKS
|
||||
## value: /home/runner/k8s-novolume/index.js
|
||||
## - name: ACTIONS_RUNNER_POD_NAME
|
||||
## valueFrom:
|
||||
## fieldRef:
|
||||
## fieldPath: metadata.name
|
||||
## - name: ACTIONS_RUNNER_IMAGE
|
||||
## value: ghcr.io/actions/actions-runner:latest # should match the runnerimage
|
||||
## - name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER
|
||||
## value: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: runner
|
||||
|
||||
@@ -361,7 +361,7 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
|
||||
return nil, fmt.Errorf("failed to decode job available: %w", err)
|
||||
}
|
||||
|
||||
l.logger.Info("Job available message received", "jobId", jobAvailable.RunnerRequestId)
|
||||
l.logger.Info("Job available message received", "jobId", jobAvailable.JobID)
|
||||
parsedMsg.jobsAvailable = append(parsedMsg.jobsAvailable, &jobAvailable)
|
||||
|
||||
case messageTypeJobAssigned:
|
||||
@@ -370,14 +370,14 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
|
||||
return nil, fmt.Errorf("failed to decode job assigned: %w", err)
|
||||
}
|
||||
|
||||
l.logger.Info("Job assigned message received", "jobId", jobAssigned.RunnerRequestId)
|
||||
l.logger.Info("Job assigned message received", "jobId", jobAssigned.JobID)
|
||||
|
||||
case messageTypeJobStarted:
|
||||
var jobStarted actions.JobStarted
|
||||
if err := json.Unmarshal(msg, &jobStarted); err != nil {
|
||||
return nil, fmt.Errorf("could not decode job started message. %w", err)
|
||||
}
|
||||
l.logger.Info("Job started message received.", "RequestId", jobStarted.RunnerRequestId, "RunnerId", jobStarted.RunnerId)
|
||||
l.logger.Info("Job started message received.", "JobID", jobStarted.JobID, "RunnerId", jobStarted.RunnerID)
|
||||
parsedMsg.jobsStarted = append(parsedMsg.jobsStarted, &jobStarted)
|
||||
|
||||
case messageTypeJobCompleted:
|
||||
@@ -386,7 +386,13 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
|
||||
return nil, fmt.Errorf("failed to decode job completed: %w", err)
|
||||
}
|
||||
|
||||
l.logger.Info("Job completed message received.", "RequestId", jobCompleted.RunnerRequestId, "Result", jobCompleted.Result, "RunnerId", jobCompleted.RunnerId, "RunnerName", jobCompleted.RunnerName)
|
||||
l.logger.Info(
|
||||
"Job completed message received.",
|
||||
"JobID", jobCompleted.JobID,
|
||||
"Result", jobCompleted.Result,
|
||||
"RunnerId", jobCompleted.RunnerId,
|
||||
"RunnerName", jobCompleted.RunnerName,
|
||||
)
|
||||
parsedMsg.jobsCompleted = append(parsedMsg.jobsCompleted, &jobCompleted)
|
||||
|
||||
default:
|
||||
@@ -400,7 +406,7 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
|
||||
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
|
||||
ids := make([]int64, 0, len(jobsAvailable))
|
||||
for _, job := range jobsAvailable {
|
||||
ids = append(ids, job.RunnerRequestId)
|
||||
ids = append(ids, job.RunnerRequestID)
|
||||
}
|
||||
|
||||
l.logger.Info("Acquiring jobs", "count", len(ids), "requestIds", fmt.Sprint(ids))
|
||||
|
||||
@@ -627,17 +627,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
|
||||
availableJobs := []*actions.JobAvailable{
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestId: 1,
|
||||
RunnerRequestID: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestId: 2,
|
||||
RunnerRequestID: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestId: 3,
|
||||
RunnerRequestID: 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -678,17 +678,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
|
||||
availableJobs := []*actions.JobAvailable{
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestId: 1,
|
||||
RunnerRequestID: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestId: 2,
|
||||
RunnerRequestID: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestId: 3,
|
||||
RunnerRequestID: 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -724,17 +724,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
|
||||
availableJobs := []*actions.JobAvailable{
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestId: 1,
|
||||
RunnerRequestID: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestId: 2,
|
||||
RunnerRequestID: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestId: 3,
|
||||
RunnerRequestID: 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -809,17 +809,17 @@ func TestListener_acquireAvailableJobs(t *testing.T) {
|
||||
availableJobs := []*actions.JobAvailable{
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestId: 1,
|
||||
RunnerRequestID: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestId: 2,
|
||||
RunnerRequestID: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobMessageBase: actions.JobMessageBase{
|
||||
RunnerRequestId: 3,
|
||||
RunnerRequestID: 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -881,7 +881,7 @@ func TestListener_parseMessage(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobAvailable,
|
||||
},
|
||||
RunnerRequestId: 1,
|
||||
RunnerRequestID: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -890,7 +890,7 @@ func TestListener_parseMessage(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobAvailable,
|
||||
},
|
||||
RunnerRequestId: 2,
|
||||
RunnerRequestID: 2,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -904,7 +904,7 @@ func TestListener_parseMessage(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobAssigned,
|
||||
},
|
||||
RunnerRequestId: 3,
|
||||
RunnerRequestID: 3,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -912,7 +912,7 @@ func TestListener_parseMessage(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobAssigned,
|
||||
},
|
||||
RunnerRequestId: 4,
|
||||
RunnerRequestID: 4,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -926,9 +926,9 @@ func TestListener_parseMessage(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobStarted,
|
||||
},
|
||||
RunnerRequestId: 5,
|
||||
RunnerRequestID: 5,
|
||||
},
|
||||
RunnerId: 2,
|
||||
RunnerID: 2,
|
||||
RunnerName: "runner2",
|
||||
},
|
||||
}
|
||||
@@ -942,7 +942,7 @@ func TestListener_parseMessage(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobCompleted,
|
||||
},
|
||||
RunnerRequestId: 6,
|
||||
RunnerRequestID: 6,
|
||||
},
|
||||
Result: "success",
|
||||
RunnerId: 1,
|
||||
|
||||
@@ -123,9 +123,9 @@ func TestHandleMessageMetrics(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobStarted,
|
||||
},
|
||||
RunnerRequestId: 8,
|
||||
RunnerRequestID: 8,
|
||||
},
|
||||
RunnerId: 3,
|
||||
RunnerID: 3,
|
||||
RunnerName: "runner3",
|
||||
},
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func TestHandleMessageMetrics(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobCompleted,
|
||||
},
|
||||
RunnerRequestId: 6,
|
||||
RunnerRequestID: 6,
|
||||
},
|
||||
Result: "success",
|
||||
RunnerId: 1,
|
||||
@@ -150,7 +150,7 @@ func TestHandleMessageMetrics(t *testing.T) {
|
||||
JobMessageType: actions.JobMessageType{
|
||||
MessageType: messageTypeJobCompleted,
|
||||
},
|
||||
RunnerRequestId: 7,
|
||||
RunnerRequestID: 7,
|
||||
},
|
||||
Result: "success",
|
||||
RunnerId: 2,
|
||||
|
||||
@@ -22,6 +22,8 @@ const (
|
||||
labelKeyRepository = "repository"
|
||||
labelKeyJobName = "job_name"
|
||||
labelKeyJobWorkflowRef = "job_workflow_ref"
|
||||
labelKeyJobWorkflowName = "job_workflow_name"
|
||||
labelKeyJobWorkflowTarget = "job_workflow_target"
|
||||
labelKeyEventName = "event_name"
|
||||
labelKeyJobResult = "job_result"
|
||||
)
|
||||
@@ -75,13 +77,16 @@ var metricsHelp = metricsHelpRegistry{
|
||||
}
|
||||
|
||||
func (e *exporter) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
|
||||
workflowRefInfo := ParseWorkflowRef(jobBase.JobWorkflowRef)
|
||||
return prometheus.Labels{
|
||||
labelKeyEnterprise: e.scaleSetLabels[labelKeyEnterprise],
|
||||
labelKeyOrganization: jobBase.OwnerName,
|
||||
labelKeyRepository: jobBase.RepositoryName,
|
||||
labelKeyJobName: jobBase.JobDisplayName,
|
||||
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
|
||||
labelKeyEventName: jobBase.EventName,
|
||||
labelKeyEnterprise: e.scaleSetLabels[labelKeyEnterprise],
|
||||
labelKeyOrganization: jobBase.OwnerName,
|
||||
labelKeyRepository: jobBase.RepositoryName,
|
||||
labelKeyJobName: jobBase.JobDisplayName,
|
||||
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
|
||||
labelKeyJobWorkflowName: workflowRefInfo.Name,
|
||||
labelKeyJobWorkflowTarget: workflowRefInfo.Target,
|
||||
labelKeyEventName: jobBase.EventName,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -468,7 +473,7 @@ func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
|
||||
e.setGauge(MetricAssignedJobs, e.scaleSetLabels, float64(stats.TotalAssignedJobs))
|
||||
e.setGauge(MetricRunningJobs, e.scaleSetLabels, float64(stats.TotalRunningJobs))
|
||||
e.setGauge(MetricRegisteredRunners, e.scaleSetLabels, float64(stats.TotalRegisteredRunners))
|
||||
e.setGauge(MetricBusyRunners, e.scaleSetLabels, float64(float64(stats.TotalBusyRunners)))
|
||||
e.setGauge(MetricBusyRunners, e.scaleSetLabels, float64(stats.TotalBusyRunners))
|
||||
e.setGauge(MetricIdleRunners, e.scaleSetLabels, float64(stats.TotalIdleRunners))
|
||||
}
|
||||
|
||||
|
||||
99
cmd/ghalistener/metrics/metrics_integration_test.go
Normal file
99
cmd/ghalistener/metrics/metrics_integration_test.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMetricsWithWorkflowRefParsing(t *testing.T) {
|
||||
// Create a test exporter
|
||||
exporter := &exporter{
|
||||
scaleSetLabels: prometheus.Labels{
|
||||
labelKeyEnterprise: "test-enterprise",
|
||||
labelKeyOrganization: "test-org",
|
||||
labelKeyRepository: "test-repo",
|
||||
labelKeyRunnerScaleSetName: "test-scale-set",
|
||||
labelKeyRunnerScaleSetNamespace: "test-namespace",
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
jobBase actions.JobMessageBase
|
||||
wantName string
|
||||
wantTarget string
|
||||
}{
|
||||
{
|
||||
name: "main branch workflow",
|
||||
jobBase: actions.JobMessageBase{
|
||||
OwnerName: "actions",
|
||||
RepositoryName: "runner",
|
||||
JobDisplayName: "Build and Test",
|
||||
JobWorkflowRef: "actions/runner/.github/workflows/build.yml@refs/heads/main",
|
||||
EventName: "push",
|
||||
},
|
||||
wantName: "build",
|
||||
wantTarget: "heads/main",
|
||||
},
|
||||
{
|
||||
name: "feature branch workflow",
|
||||
jobBase: actions.JobMessageBase{
|
||||
OwnerName: "myorg",
|
||||
RepositoryName: "myrepo",
|
||||
JobDisplayName: "CI/CD Pipeline",
|
||||
JobWorkflowRef: "myorg/myrepo/.github/workflows/ci-cd-pipeline.yml@refs/heads/feature/new-metrics",
|
||||
EventName: "push",
|
||||
},
|
||||
wantName: "ci-cd-pipeline",
|
||||
wantTarget: "heads/feature/new-metrics",
|
||||
},
|
||||
{
|
||||
name: "pull request workflow",
|
||||
jobBase: actions.JobMessageBase{
|
||||
OwnerName: "actions",
|
||||
RepositoryName: "runner",
|
||||
JobDisplayName: "PR Checks",
|
||||
JobWorkflowRef: "actions/runner/.github/workflows/pr-checks.yml@refs/pull/123/merge",
|
||||
EventName: "pull_request",
|
||||
},
|
||||
wantName: "pr-checks",
|
||||
wantTarget: "pull/123",
|
||||
},
|
||||
{
|
||||
name: "tag workflow",
|
||||
jobBase: actions.JobMessageBase{
|
||||
OwnerName: "actions",
|
||||
RepositoryName: "runner",
|
||||
JobDisplayName: "Release",
|
||||
JobWorkflowRef: "actions/runner/.github/workflows/release.yml@refs/tags/v1.2.3",
|
||||
EventName: "release",
|
||||
},
|
||||
wantName: "release",
|
||||
wantTarget: "tags/v1.2.3",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
labels := exporter.jobLabels(&tt.jobBase)
|
||||
|
||||
// Build expected labels
|
||||
expectedLabels := prometheus.Labels{
|
||||
labelKeyEnterprise: "test-enterprise",
|
||||
labelKeyOrganization: tt.jobBase.OwnerName,
|
||||
labelKeyRepository: tt.jobBase.RepositoryName,
|
||||
labelKeyJobName: tt.jobBase.JobDisplayName,
|
||||
labelKeyJobWorkflowRef: tt.jobBase.JobWorkflowRef,
|
||||
labelKeyJobWorkflowName: tt.wantName,
|
||||
labelKeyJobWorkflowTarget: tt.wantTarget,
|
||||
labelKeyEventName: tt.jobBase.EventName,
|
||||
}
|
||||
|
||||
// Assert all expected labels match
|
||||
assert.Equal(t, expectedLabels, labels, "jobLabels() returned unexpected labels for %s", tt.name)
|
||||
})
|
||||
}
|
||||
}
|
||||
78
cmd/ghalistener/metrics/workflow_ref_parser.go
Normal file
78
cmd/ghalistener/metrics/workflow_ref_parser.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// WorkflowRefInfo contains parsed information from a job_workflow_ref
|
||||
type WorkflowRefInfo struct {
|
||||
// Name is the workflow file name without extension
|
||||
Name string
|
||||
// Target is the target ref with type prefix retained for clarity
|
||||
// Examples:
|
||||
// - heads/main (branch)
|
||||
// - heads/feature/new-feature (branch)
|
||||
// - tags/v1.2.3 (tag)
|
||||
// - pull/123 (pull request)
|
||||
Target string
|
||||
}
|
||||
|
||||
// ParseWorkflowRef parses a job_workflow_ref string to extract workflow name and target
|
||||
// Format: {owner}/{repo}/.github/workflows/{workflow_file}@{ref}
|
||||
// Example: mygithuborg/myrepo/.github/workflows/blank.yml@refs/heads/main
|
||||
//
|
||||
// The target field preserves type prefixes to differentiate between:
|
||||
// - Branch references: "heads/{branch}" (from refs/heads/{branch})
|
||||
// - Tag references: "tags/{tag}" (from refs/tags/{tag})
|
||||
// - Pull requests: "pull/{number}" (from refs/pull/{number}/merge)
|
||||
func ParseWorkflowRef(workflowRef string) WorkflowRefInfo {
|
||||
info := WorkflowRefInfo{}
|
||||
|
||||
if workflowRef == "" {
|
||||
return info
|
||||
}
|
||||
|
||||
// Split by @ to separate path and ref
|
||||
parts := strings.Split(workflowRef, "@")
|
||||
if len(parts) != 2 {
|
||||
return info
|
||||
}
|
||||
|
||||
workflowPath := parts[0]
|
||||
ref := parts[1]
|
||||
|
||||
// Extract workflow name from path
|
||||
// The path format is: {owner}/{repo}/.github/workflows/{workflow_file}
|
||||
workflowFile := path.Base(workflowPath)
|
||||
// Remove .yml or .yaml extension
|
||||
info.Name = strings.TrimSuffix(strings.TrimSuffix(workflowFile, ".yml"), ".yaml")
|
||||
|
||||
// Extract target from ref based on type
|
||||
// Branch refs: refs/heads/{branch}
|
||||
// Tag refs: refs/tags/{tag}
|
||||
// PR refs: refs/pull/{number}/merge
|
||||
const (
|
||||
branchPrefix = "refs/heads/"
|
||||
tagPrefix = "refs/tags/"
|
||||
prPrefix = "refs/pull/"
|
||||
)
|
||||
|
||||
switch {
|
||||
case strings.HasPrefix(ref, branchPrefix):
|
||||
// Keep "heads/" prefix to indicate branch
|
||||
info.Target = "heads/" + strings.TrimPrefix(ref, branchPrefix)
|
||||
case strings.HasPrefix(ref, tagPrefix):
|
||||
// Keep "tags/" prefix to indicate tag
|
||||
info.Target = "tags/" + strings.TrimPrefix(ref, tagPrefix)
|
||||
case strings.HasPrefix(ref, prPrefix):
|
||||
// Extract PR number from refs/pull/{number}/merge
|
||||
// Keep "pull/" prefix to indicate pull request
|
||||
prPart := strings.TrimPrefix(ref, prPrefix)
|
||||
if idx := strings.Index(prPart, "/"); idx > 0 {
|
||||
info.Target = "pull/" + prPart[:idx]
|
||||
}
|
||||
}
|
||||
|
||||
return info
|
||||
}
|
||||
82
cmd/ghalistener/metrics/workflow_ref_parser_test.go
Normal file
82
cmd/ghalistener/metrics/workflow_ref_parser_test.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseWorkflowRef(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
workflowRef string
|
||||
wantName string
|
||||
wantTarget string
|
||||
}{
|
||||
{
|
||||
name: "standard branch reference with yml",
|
||||
workflowRef: "actions-runner-controller-sandbox/mumoshu-orgrunner-test-01/.github/workflows/blank.yml@refs/heads/main",
|
||||
wantName: "blank",
|
||||
wantTarget: "heads/main",
|
||||
},
|
||||
{
|
||||
name: "branch with special characters",
|
||||
workflowRef: "owner/repo/.github/workflows/ci-cd.yml@refs/heads/feature/new-feature",
|
||||
wantName: "ci-cd",
|
||||
wantTarget: "heads/feature/new-feature",
|
||||
},
|
||||
{
|
||||
name: "yaml extension",
|
||||
workflowRef: "owner/repo/.github/workflows/deploy.yaml@refs/heads/develop",
|
||||
wantName: "deploy",
|
||||
wantTarget: "heads/develop",
|
||||
},
|
||||
{
|
||||
name: "tag reference",
|
||||
workflowRef: "owner/repo/.github/workflows/release.yml@refs/tags/v1.0.0",
|
||||
wantName: "release",
|
||||
wantTarget: "tags/v1.0.0",
|
||||
},
|
||||
{
|
||||
name: "pull request reference",
|
||||
workflowRef: "owner/repo/.github/workflows/test.yml@refs/pull/123/merge",
|
||||
wantName: "test",
|
||||
wantTarget: "pull/123",
|
||||
},
|
||||
{
|
||||
name: "empty workflow ref",
|
||||
workflowRef: "",
|
||||
wantName: "",
|
||||
wantTarget: "",
|
||||
},
|
||||
{
|
||||
name: "invalid format - no @ separator",
|
||||
workflowRef: "owner/repo/.github/workflows/test.yml",
|
||||
wantName: "",
|
||||
wantTarget: "",
|
||||
},
|
||||
{
|
||||
name: "workflow with dots in name",
|
||||
workflowRef: "owner/repo/.github/workflows/build.test.yml@refs/heads/main",
|
||||
wantName: "build.test",
|
||||
wantTarget: "heads/main",
|
||||
},
|
||||
{
|
||||
name: "workflow with hyphen and underscore",
|
||||
workflowRef: "owner/repo/.github/workflows/build-test_deploy.yml@refs/heads/main",
|
||||
wantName: "build-test_deploy",
|
||||
wantTarget: "heads/main",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := ParseWorkflowRef(tt.workflowRef)
|
||||
expected := WorkflowRefInfo{
|
||||
Name: tt.wantName,
|
||||
Target: tt.wantTarget,
|
||||
}
|
||||
assert.Equal(t, expected, got, "ParseWorkflowRef(%q) returned unexpected result", tt.workflowRef)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -100,10 +100,11 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
|
||||
"runnerName", jobInfo.RunnerName,
|
||||
"ownerName", jobInfo.OwnerName,
|
||||
"repoName", jobInfo.RepositoryName,
|
||||
"jobId", jobInfo.JobID,
|
||||
"workflowRef", jobInfo.JobWorkflowRef,
|
||||
"workflowRunId", jobInfo.WorkflowRunId,
|
||||
"workflowRunId", jobInfo.WorkflowRunID,
|
||||
"jobDisplayName", jobInfo.JobDisplayName,
|
||||
"requestId", jobInfo.RunnerRequestId)
|
||||
"requestId", jobInfo.RunnerRequestID)
|
||||
|
||||
original, err := json.Marshal(&v1alpha1.EphemeralRunner{})
|
||||
if err != nil {
|
||||
@@ -113,9 +114,10 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
|
||||
patch, err := json.Marshal(
|
||||
&v1alpha1.EphemeralRunner{
|
||||
Status: v1alpha1.EphemeralRunnerStatus{
|
||||
JobRequestId: jobInfo.RunnerRequestId,
|
||||
JobRequestId: jobInfo.RunnerRequestID,
|
||||
JobRepositoryName: fmt.Sprintf("%s/%s", jobInfo.OwnerName, jobInfo.RepositoryName),
|
||||
WorkflowRunId: jobInfo.WorkflowRunId,
|
||||
JobID: jobInfo.JobID,
|
||||
WorkflowRunId: jobInfo.WorkflowRunID,
|
||||
JobWorkflowRef: jobInfo.JobWorkflowRef,
|
||||
JobDisplayName: jobInfo.JobDisplayName,
|
||||
},
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -15571,4 +15571,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -36,6 +36,9 @@ spec:
|
||||
- jsonPath: .status.jobDisplayName
|
||||
name: JobDisplayName
|
||||
type: string
|
||||
- jsonPath: .status.jobId
|
||||
name: JobId
|
||||
type: string
|
||||
- jsonPath: .status.message
|
||||
name: Message
|
||||
type: string
|
||||
@@ -7846,6 +7849,8 @@ spec:
|
||||
type: object
|
||||
jobDisplayName:
|
||||
type: string
|
||||
jobId:
|
||||
type: string
|
||||
jobRepositoryName:
|
||||
type: string
|
||||
jobRequestId:
|
||||
@@ -7874,8 +7879,6 @@ spec:
|
||||
type: string
|
||||
runnerId:
|
||||
type: integer
|
||||
runnerJITConfig:
|
||||
type: string
|
||||
runnerName:
|
||||
type: string
|
||||
workflowRunId:
|
||||
@@ -7887,4 +7890,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -7859,4 +7859,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -12,306 +12,313 @@ spec:
|
||||
listKind: HorizontalRunnerAutoscalerList
|
||||
plural: horizontalrunnerautoscalers
|
||||
shortNames:
|
||||
- hra
|
||||
- hra
|
||||
singular: horizontalrunnerautoscaler
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.minReplicas
|
||||
name: Min
|
||||
type: number
|
||||
- jsonPath: .spec.maxReplicas
|
||||
name: Max
|
||||
type: number
|
||||
- jsonPath: .status.desiredReplicas
|
||||
name: Desired
|
||||
type: number
|
||||
- jsonPath: .status.scheduledOverridesSummary
|
||||
name: Schedule
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HorizontalRunnerAutoscalerSpec defines the desired state of HorizontalRunnerAutoscaler
|
||||
properties:
|
||||
capacityReservations:
|
||||
items:
|
||||
description: |-
|
||||
CapacityReservation specifies the number of replicas temporarily added
|
||||
to the scale target until ExpirationTime.
|
||||
properties:
|
||||
effectiveTime:
|
||||
format: date-time
|
||||
type: string
|
||||
expirationTime:
|
||||
format: date-time
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
replicas:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
githubAPICredentialsFrom:
|
||||
properties:
|
||||
secretRef:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: object
|
||||
maxReplicas:
|
||||
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
||||
type: integer
|
||||
metrics:
|
||||
description: Metrics is the collection of various metric targets to calculate desired number of runners
|
||||
items:
|
||||
properties:
|
||||
repositoryNames:
|
||||
description: |-
|
||||
RepositoryNames is the list of repository names to be used for calculating the metric.
|
||||
For example, a repository name is the REPO part of `github.com/USER/REPO`.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
scaleDownAdjustment:
|
||||
description: |-
|
||||
ScaleDownAdjustment is the number of runners removed on scale-down.
|
||||
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
|
||||
type: integer
|
||||
scaleDownFactor:
|
||||
description: |-
|
||||
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
|
||||
to determine how many pods should be removed.
|
||||
type: string
|
||||
scaleDownThreshold:
|
||||
description: |-
|
||||
ScaleDownThreshold is the percentage of busy runners less than which will
|
||||
trigger the hpa to scale the runners down.
|
||||
type: string
|
||||
scaleUpAdjustment:
|
||||
description: |-
|
||||
ScaleUpAdjustment is the number of runners added on scale-up.
|
||||
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
||||
type: integer
|
||||
scaleUpFactor:
|
||||
description: |-
|
||||
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
|
||||
to determine how many pods should be added.
|
||||
type: string
|
||||
scaleUpThreshold:
|
||||
description: |-
|
||||
ScaleUpThreshold is the percentage of busy runners greater than which will
|
||||
trigger the hpa to scale runners up.
|
||||
type: string
|
||||
type:
|
||||
description: |-
|
||||
Type is the type of metric to be used for autoscaling.
|
||||
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
minReplicas:
|
||||
description: MinReplicas is the minimum number of replicas the deployment is allowed to scale
|
||||
type: integer
|
||||
scaleDownDelaySecondsAfterScaleOut:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.minReplicas
|
||||
name: Min
|
||||
type: number
|
||||
- jsonPath: .spec.maxReplicas
|
||||
name: Max
|
||||
type: number
|
||||
- jsonPath: .status.desiredReplicas
|
||||
name: Desired
|
||||
type: number
|
||||
- jsonPath: .status.scheduledOverridesSummary
|
||||
name: Schedule
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
|
||||
API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HorizontalRunnerAutoscalerSpec defines the desired state
|
||||
of HorizontalRunnerAutoscaler
|
||||
properties:
|
||||
capacityReservations:
|
||||
items:
|
||||
description: |-
|
||||
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
|
||||
Used to prevent flapping (down->up->down->... loop)
|
||||
type: integer
|
||||
scaleTargetRef:
|
||||
description: ScaleTargetRef is the reference to scaled resource like RunnerDeployment
|
||||
CapacityReservation specifies the number of replicas temporarily added
|
||||
to the scale target until ExpirationTime.
|
||||
properties:
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced
|
||||
enum:
|
||||
- RunnerDeployment
|
||||
- RunnerSet
|
||||
effectiveTime:
|
||||
format: date-time
|
||||
type: string
|
||||
expirationTime:
|
||||
format: date-time
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
replicas:
|
||||
type: integer
|
||||
type: object
|
||||
scaleUpTriggers:
|
||||
description: |-
|
||||
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
|
||||
on each webhook requested received by the webhookBasedAutoscaler.
|
||||
|
||||
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
|
||||
|
||||
Note that the added runners remain until the next sync period at least,
|
||||
and they may or may not be used by GitHub Actions depending on the timing.
|
||||
They are intended to be used to gain "resource slack" immediately after you
|
||||
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
|
||||
items:
|
||||
type: array
|
||||
githubAPICredentialsFrom:
|
||||
properties:
|
||||
secretRef:
|
||||
properties:
|
||||
amount:
|
||||
type: integer
|
||||
duration:
|
||||
type: string
|
||||
githubEvent:
|
||||
properties:
|
||||
checkRun:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||
properties:
|
||||
names:
|
||||
description: |-
|
||||
Names is a list of GitHub Actions glob patterns.
|
||||
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
|
||||
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
|
||||
So it is very likely that you can utilize this to trigger depending on the job.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
repositories:
|
||||
description: |-
|
||||
Repositories is a list of GitHub repositories.
|
||||
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
status:
|
||||
type: string
|
||||
types:
|
||||
description: 'One of: created, rerequested, or completed'
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
pullRequest:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||
properties:
|
||||
branches:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
types:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
push:
|
||||
description: |-
|
||||
PushSpec is the condition for triggering scale-up on push event
|
||||
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||
type: object
|
||||
workflowJob:
|
||||
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
scheduledOverrides:
|
||||
description: |-
|
||||
ScheduledOverrides is the list of ScheduledOverride.
|
||||
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||
The earlier a scheduled override is, the higher it is prioritized.
|
||||
items:
|
||||
description: |-
|
||||
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
||||
properties:
|
||||
endTime:
|
||||
description: EndTime is the time at which the first override ends.
|
||||
format: date-time
|
||||
type: string
|
||||
minReplicas:
|
||||
description: |-
|
||||
MinReplicas is the number of runners while overriding.
|
||||
If omitted, it doesn't override minReplicas.
|
||||
minimum: 0
|
||||
nullable: true
|
||||
type: integer
|
||||
recurrenceRule:
|
||||
properties:
|
||||
frequency:
|
||||
description: |-
|
||||
Frequency is the name of a predefined interval of each recurrence.
|
||||
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
|
||||
If empty, the corresponding override happens only once.
|
||||
enum:
|
||||
- Daily
|
||||
- Weekly
|
||||
- Monthly
|
||||
- Yearly
|
||||
type: string
|
||||
untilTime:
|
||||
description: |-
|
||||
UntilTime is the time of the final recurrence.
|
||||
If empty, the schedule recurs forever.
|
||||
format: date-time
|
||||
type: string
|
||||
type: object
|
||||
startTime:
|
||||
description: StartTime is the time at which the first override starts.
|
||||
format: date-time
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- endTime
|
||||
- startTime
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
status:
|
||||
properties:
|
||||
cacheEntries:
|
||||
items:
|
||||
properties:
|
||||
expirationTime:
|
||||
format: date-time
|
||||
type: object
|
||||
maxReplicas:
|
||||
description: MaxReplicas is the maximum number of replicas the deployment
|
||||
is allowed to scale
|
||||
type: integer
|
||||
metrics:
|
||||
description: Metrics is the collection of various metric targets to
|
||||
calculate desired number of runners
|
||||
items:
|
||||
properties:
|
||||
repositoryNames:
|
||||
description: |-
|
||||
RepositoryNames is the list of repository names to be used for calculating the metric.
|
||||
For example, a repository name is the REPO part of `github.com/USER/REPO`.
|
||||
items:
|
||||
type: string
|
||||
key:
|
||||
type: string
|
||||
value:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
desiredReplicas:
|
||||
type: array
|
||||
scaleDownAdjustment:
|
||||
description: |-
|
||||
ScaleDownAdjustment is the number of runners removed on scale-down.
|
||||
You can only specify either ScaleDownFactor or ScaleDownAdjustment.
|
||||
type: integer
|
||||
scaleDownFactor:
|
||||
description: |-
|
||||
ScaleDownFactor is the multiplicative factor applied to the current number of runners used
|
||||
to determine how many pods should be removed.
|
||||
type: string
|
||||
scaleDownThreshold:
|
||||
description: |-
|
||||
ScaleDownThreshold is the percentage of busy runners less than which will
|
||||
trigger the hpa to scale the runners down.
|
||||
type: string
|
||||
scaleUpAdjustment:
|
||||
description: |-
|
||||
ScaleUpAdjustment is the number of runners added on scale-up.
|
||||
You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
||||
type: integer
|
||||
scaleUpFactor:
|
||||
description: |-
|
||||
ScaleUpFactor is the multiplicative factor applied to the current number of runners used
|
||||
to determine how many pods should be added.
|
||||
type: string
|
||||
scaleUpThreshold:
|
||||
description: |-
|
||||
ScaleUpThreshold is the percentage of busy runners greater than which will
|
||||
trigger the hpa to scale runners up.
|
||||
type: string
|
||||
type:
|
||||
description: |-
|
||||
Type is the type of metric to be used for autoscaling.
|
||||
It can be TotalNumberOfQueuedAndInProgressWorkflowRuns or PercentageRunnersBusy.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
minReplicas:
|
||||
description: MinReplicas is the minimum number of replicas the deployment
|
||||
is allowed to scale
|
||||
type: integer
|
||||
scaleDownDelaySecondsAfterScaleOut:
|
||||
description: |-
|
||||
ScaleDownDelaySecondsAfterScaleUp is the approximate delay for a scale down followed by a scale up
|
||||
Used to prevent flapping (down->up->down->... loop)
|
||||
type: integer
|
||||
scaleTargetRef:
|
||||
description: ScaleTargetRef is the reference to scaled resource like
|
||||
RunnerDeployment
|
||||
properties:
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced
|
||||
enum:
|
||||
- RunnerDeployment
|
||||
- RunnerSet
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
type: object
|
||||
scaleUpTriggers:
|
||||
description: |-
|
||||
ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
|
||||
on each webhook requested received by the webhookBasedAutoscaler.
|
||||
|
||||
This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
|
||||
|
||||
Note that the added runners remain until the next sync period at least,
|
||||
and they may or may not be used by GitHub Actions depending on the timing.
|
||||
They are intended to be used to gain "resource slack" immediately after you
|
||||
receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
|
||||
items:
|
||||
properties:
|
||||
amount:
|
||||
type: integer
|
||||
duration:
|
||||
type: string
|
||||
githubEvent:
|
||||
properties:
|
||||
checkRun:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||
properties:
|
||||
names:
|
||||
description: |-
|
||||
Names is a list of GitHub Actions glob patterns.
|
||||
Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
|
||||
Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
|
||||
So it is very likely that you can utilize this to trigger depending on the job.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
repositories:
|
||||
description: |-
|
||||
Repositories is a list of GitHub repositories.
|
||||
Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
status:
|
||||
type: string
|
||||
types:
|
||||
description: 'One of: created, rerequested, or completed'
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
pullRequest:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||
properties:
|
||||
branches:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
types:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
push:
|
||||
description: |-
|
||||
PushSpec is the condition for triggering scale-up on push event
|
||||
Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||
type: object
|
||||
workflowJob:
|
||||
description: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#workflow_job
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
scheduledOverrides:
|
||||
description: |-
|
||||
ScheduledOverrides is the list of ScheduledOverride.
|
||||
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||
The earlier a scheduled override is, the higher it is prioritized.
|
||||
items:
|
||||
description: |-
|
||||
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
|
||||
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
||||
type: integer
|
||||
lastSuccessfulScaleOutTime:
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
|
||||
RunnerDeployment's generation, which is updated on mutation by the API Server.
|
||||
format: int64
|
||||
type: integer
|
||||
scheduledOverridesSummary:
|
||||
description: |-
|
||||
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
|
||||
for observability.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||
A schedule can optionally be recurring, so that the corresponding override happens every day, week, month, or year.
|
||||
properties:
|
||||
endTime:
|
||||
description: EndTime is the time at which the first override
|
||||
ends.
|
||||
format: date-time
|
||||
type: string
|
||||
minReplicas:
|
||||
description: |-
|
||||
MinReplicas is the number of runners while overriding.
|
||||
If omitted, it doesn't override minReplicas.
|
||||
minimum: 0
|
||||
nullable: true
|
||||
type: integer
|
||||
recurrenceRule:
|
||||
properties:
|
||||
frequency:
|
||||
description: |-
|
||||
Frequency is the name of a predefined interval of each recurrence.
|
||||
The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
|
||||
If empty, the corresponding override happens only once.
|
||||
enum:
|
||||
- Daily
|
||||
- Weekly
|
||||
- Monthly
|
||||
- Yearly
|
||||
type: string
|
||||
untilTime:
|
||||
description: |-
|
||||
UntilTime is the time of the final recurrence.
|
||||
If empty, the schedule recurs forever.
|
||||
format: date-time
|
||||
type: string
|
||||
type: object
|
||||
startTime:
|
||||
description: StartTime is the time at which the first override
|
||||
starts.
|
||||
format: date-time
|
||||
type: string
|
||||
required:
|
||||
- endTime
|
||||
- startTime
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
status:
|
||||
properties:
|
||||
cacheEntries:
|
||||
items:
|
||||
properties:
|
||||
expirationTime:
|
||||
format: date-time
|
||||
type: string
|
||||
key:
|
||||
type: string
|
||||
value:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
desiredReplicas:
|
||||
description: |-
|
||||
DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
|
||||
This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
||||
type: integer
|
||||
lastSuccessfulScaleOutTime:
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
|
||||
RunnerDeployment's generation, which is updated on mutation by the API Server.
|
||||
format: int64
|
||||
type: integer
|
||||
scheduledOverridesSummary:
|
||||
description: |-
|
||||
ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
|
||||
for observability.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
|
||||
@@ -9348,4 +9348,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -9322,4 +9322,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -9328,4 +9328,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -8389,4 +8389,3 @@ spec:
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
preserveUnknownFields: false
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
@@ -179,11 +180,64 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if ephemeralRunner.Status.RunnerId == 0 {
|
||||
log.Info("Creating new ephemeral runner registration and updating status with runner config")
|
||||
if r, err := r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log); r != nil {
|
||||
return *r, err
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Failed to fetch secret")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
jitConfig, err := r.createRunnerJitConfig(ctx, ephemeralRunner, log)
|
||||
switch {
|
||||
case err == nil:
|
||||
// create secret if not created
|
||||
log.Info("Creating new ephemeral runner secret for jitconfig.")
|
||||
jitSecret, err := r.createSecret(ctx, ephemeralRunner, jitConfig, log)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to create secret: %w", err)
|
||||
}
|
||||
log.Info("Created new ephemeral runner secret for jitconfig.")
|
||||
secret = jitSecret
|
||||
|
||||
case errors.Is(err, retryableError):
|
||||
log.Info("Encountered retryable error, requeueing", "error", err.Error())
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
case errors.Is(err, fatalError):
|
||||
log.Info("JIT config cannot be created for this ephemeral runner, issuing delete", "error", err.Error())
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to delete the ephemeral runner: %w", err)
|
||||
}
|
||||
log.Info("Request to delete ephemeral runner has been issued")
|
||||
return ctrl.Result{}, nil
|
||||
default:
|
||||
log.Error(err, "Failed to create ephemeral runners secret", "error", err.Error())
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if ephemeralRunner.Status.RunnerId == 0 {
|
||||
log.Info("Updating ephemeral runner status with runnerId and runnerName")
|
||||
runnerID, err := strconv.Atoi(string(secret.Data["runnerId"]))
|
||||
if err != nil {
|
||||
log.Error(err, "Runner config secret is corrupted: missing runnerId")
|
||||
log.Info("Deleting corrupted runner config secret")
|
||||
if err := r.Delete(ctx, secret); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to delete the corrupted runner config secret")
|
||||
}
|
||||
log.Info("Corrupted runner config secret has been deleted")
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
runnerName := string(secret.Data["runnerName"])
|
||||
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
obj.Status.RunnerId = runnerID
|
||||
obj.Status.RunnerName = runnerName
|
||||
}); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %w", err)
|
||||
}
|
||||
ephemeralRunner.Status.RunnerId = runnerID
|
||||
ephemeralRunner.Status.RunnerName = runnerName
|
||||
log.Info("Updated ephemeral runner status with runnerId and runnerName")
|
||||
}
|
||||
|
||||
if len(ephemeralRunner.Status.Failures) > maxFailures {
|
||||
@@ -201,33 +255,16 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
backoffDuration := failedRunnerBackoff[len(ephemeralRunner.Status.Failures)]
|
||||
nextReconciliation := lastFailure.Add(backoffDuration)
|
||||
if !lastFailure.IsZero() && now.Before(&metav1.Time{Time: nextReconciliation}) {
|
||||
requeueAfter := nextReconciliation.Sub(now.Time)
|
||||
log.Info("Backing off the next reconciliation due to failure",
|
||||
"lastFailure", lastFailure,
|
||||
"nextReconciliation", nextReconciliation,
|
||||
"requeueAfter", nextReconciliation.Sub(now.Time),
|
||||
"requeueAfter", requeueAfter,
|
||||
)
|
||||
return ctrl.Result{RequeueAfter: now.Sub(nextReconciliation)}, nil
|
||||
}
|
||||
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Failed to fetch secret")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
// create secret if not created
|
||||
log.Info("Creating new ephemeral runner secret for jitconfig.")
|
||||
if r, err := r.createSecret(ctx, ephemeralRunner, log); r != nil {
|
||||
return *r, err
|
||||
}
|
||||
|
||||
// Retry to get the secret that was just created.
|
||||
// Otherwise, even though we want to continue to create the pod,
|
||||
// it fails due to the missing secret resulting in an invalid pod spec.
|
||||
if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
|
||||
log.Error(err, "Failed to fetch secret")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{
|
||||
Requeue: true,
|
||||
RequeueAfter: requeueAfter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
@@ -236,13 +273,15 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
log.Error(err, "Failed to fetch the pod")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
log.Info("Ephemeral runner pod does not exist. Creating new ephemeral runner")
|
||||
|
||||
// Pod was not found. Create if the pod has never been created
|
||||
log.Info("Creating new EphemeralRunner pod.")
|
||||
result, err := r.createPod(ctx, ephemeralRunner, secret, log)
|
||||
switch {
|
||||
case err == nil:
|
||||
return result, nil
|
||||
case kerrors.IsAlreadyExists(err):
|
||||
log.Info("Runner pod already exists. Waiting for the pod event to be received")
|
||||
return ctrl.Result{Requeue: true, RequeueAfter: 5 * time.Second}, nil
|
||||
case kerrors.IsInvalid(err) || kerrors.IsForbidden(err):
|
||||
log.Error(err, "Failed to create a pod due to unrecoverable failure")
|
||||
errMessage := fmt.Sprintf("Failed to create the pod: %v", err)
|
||||
@@ -287,34 +326,28 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
|
||||
case cs.State.Terminated.ExitCode != 0: // failed
|
||||
log.Info("Ephemeral runner container failed", "exitCode", cs.State.Terminated.ExitCode)
|
||||
if ephemeralRunner.HasJob() {
|
||||
log.Error(
|
||||
errors.New("ephemeral runner has a job assigned, but the pod has failed"),
|
||||
"Ephemeral runner either has faulty entrypoint or something external killing the runner",
|
||||
)
|
||||
log.Info("Deleting the ephemeral runner that has a job assigned but the pod has failed")
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
log.Error(err, "Failed to delete the ephemeral runner that has a job assigned but the pod has failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Error(err, "Failed to delete runner pod on failure")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
|
||||
default:
|
||||
// pod succeeded. We double-check with the service if the runner exists.
|
||||
// The reason is that image can potentially finish with status 0, but not pick up the job.
|
||||
existsInService, err := r.runnerRegisteredWithService(ctx, ephemeralRunner.DeepCopy(), log)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to check if runner is registered with the service")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if !existsInService {
|
||||
// the runner does not exist in the service, so it must be done
|
||||
log.Info("Ephemeral runner has finished since it does not exist in the service anymore")
|
||||
if err := r.markAsFinished(ctx, ephemeralRunner, log); err != nil {
|
||||
log.Error(err, "Failed to mark ephemeral runner as finished")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// The runner still exists. This can happen if the pod exited with 0 but fails to start
|
||||
log.Info("Ephemeral runner pod has finished, but the runner still exists in the service. Deleting the pod to restart it.")
|
||||
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Error(err, "failed to delete a pod that still exists in the service")
|
||||
default: // succeeded
|
||||
log.Info("Ephemeral runner has finished successfully, deleting ephemeral runner", "exitCode", cs.State.Terminated.ExitCode)
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
log.Error(err, "Failed to delete ephemeral runner after successful completion")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
@@ -484,18 +517,6 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||
log.Info("Updating ephemeral runner status to Finished")
|
||||
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
obj.Status.Phase = corev1.PodSucceeded
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to update ephemeral runner with status finished: %w", err)
|
||||
}
|
||||
|
||||
log.Info("EphemeralRunner status is marked as Finished")
|
||||
return nil
|
||||
}
|
||||
|
||||
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
|
||||
// It should not be responsible for setting the status to Failed.
|
||||
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
||||
@@ -523,14 +544,12 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateStatusWithRunnerConfig fetches runtime configuration needed by the runner
|
||||
// This method should always set .status.runnerId and .status.runnerJITConfig
|
||||
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
|
||||
func (r *EphemeralRunnerReconciler) createRunnerJitConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*actions.RunnerScaleSetJitRunnerConfig, error) {
|
||||
// Runner is not registered with the service. We need to register it first
|
||||
log.Info("Creating ephemeral runner JIT config")
|
||||
actionsClient, err := r.GetActionsService(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %w", err)
|
||||
return nil, fmt.Errorf("failed to get actions client for generating JIT config: %w", err)
|
||||
}
|
||||
|
||||
jitSettings := &actions.RunnerScaleSetJitRunnerSetting{
|
||||
@@ -545,74 +564,52 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
|
||||
}
|
||||
|
||||
jitConfig, err := actionsClient.GenerateJitRunnerConfig(ctx, jitSettings, ephemeralRunner.Spec.RunnerScaleSetId)
|
||||
if err == nil { // if NO error
|
||||
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
|
||||
return jitConfig, nil
|
||||
}
|
||||
|
||||
actionsError := &actions.ActionsError{}
|
||||
if !errors.As(err, &actionsError) {
|
||||
return nil, fmt.Errorf("failed to generate JIT config with generic error: %w", err)
|
||||
}
|
||||
|
||||
if actionsError.StatusCode != http.StatusConflict ||
|
||||
!actionsError.IsException("AgentExistsException") {
|
||||
return nil, fmt.Errorf("failed to generate JIT config with Actions service error: %w", err)
|
||||
}
|
||||
|
||||
// If the runner with the name we want already exists it means:
|
||||
// - We might have a name collision.
|
||||
// - Our previous reconciliation loop failed to update the
|
||||
// status with the runnerId and runnerJITConfig after the `GenerateJitRunnerConfig`
|
||||
// created the runner registration on the service.
|
||||
// We will try to get the runner and see if it's belong to this AutoScalingRunnerSet,
|
||||
// if so, we can simply delete the runner registration and create a new one.
|
||||
log.Info("Getting runner jit config failed with conflict error, trying to get the runner by name", "runnerName", ephemeralRunner.Name)
|
||||
existingRunner, err := actionsClient.GetRunnerByName(ctx, ephemeralRunner.Name)
|
||||
if err != nil {
|
||||
actionsError := &actions.ActionsError{}
|
||||
if !errors.As(err, &actionsError) {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %w", err)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get runner by name: %w", err)
|
||||
}
|
||||
|
||||
if actionsError.StatusCode != http.StatusConflict ||
|
||||
!actionsError.IsException("AgentExistsException") {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %w", err)
|
||||
}
|
||||
if existingRunner == nil {
|
||||
log.Info("Runner with the same name does not exist anymore, re-queuing the reconciliation")
|
||||
return nil, fmt.Errorf("%w: runner existed, retry configuration", retryableError)
|
||||
}
|
||||
|
||||
// If the runner with the name we want already exists it means:
|
||||
// - We might have a name collision.
|
||||
// - Our previous reconciliation loop failed to update the
|
||||
// status with the runnerId and runnerJITConfig after the `GenerateJitRunnerConfig`
|
||||
// created the runner registration on the service.
|
||||
// We will try to get the runner and see if it's belong to this AutoScalingRunnerSet,
|
||||
// if so, we can simply delete the runner registration and create a new one.
|
||||
log.Info("Getting runner jit config failed with conflict error, trying to get the runner by name", "runnerName", ephemeralRunner.Name)
|
||||
existingRunner, err := actionsClient.GetRunnerByName(ctx, ephemeralRunner.Name)
|
||||
log.Info("Found the runner with the same name", "runnerId", existingRunner.Id, "runnerScaleSetId", existingRunner.RunnerScaleSetId)
|
||||
if existingRunner.RunnerScaleSetId == ephemeralRunner.Spec.RunnerScaleSetId {
|
||||
log.Info("Removing the runner with the same name")
|
||||
err := actionsClient.RemoveRunner(ctx, int64(existingRunner.Id))
|
||||
if err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to get runner by name: %w", err)
|
||||
return nil, fmt.Errorf("failed to remove runner from the service: %w", err)
|
||||
}
|
||||
|
||||
if existingRunner == nil {
|
||||
log.Info("Runner with the same name does not exist, re-queuing the reconciliation")
|
||||
return &ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
log.Info("Found the runner with the same name", "runnerId", existingRunner.Id, "runnerScaleSetId", existingRunner.RunnerScaleSetId)
|
||||
if existingRunner.RunnerScaleSetId == ephemeralRunner.Spec.RunnerScaleSetId {
|
||||
log.Info("Removing the runner with the same name")
|
||||
err := actionsClient.RemoveRunner(ctx, int64(existingRunner.Id))
|
||||
if err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Removed the runner with the same name, re-queuing the reconciliation")
|
||||
return &ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
// TODO: Do we want to mark the ephemeral runner as failed, and let EphemeralRunnerSet to clean it up, so we can recover from this situation?
|
||||
// The situation is that the EphemeralRunner's name is already used by something else to register a runner, and we can't take the control back.
|
||||
return &ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %w", err)
|
||||
}
|
||||
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
|
||||
|
||||
log.Info("Updating ephemeral runner status with runnerId and runnerJITConfig")
|
||||
err = patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
obj.Status.RunnerId = jitConfig.Runner.Id
|
||||
obj.Status.RunnerName = jitConfig.Runner.Name
|
||||
obj.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
|
||||
})
|
||||
if err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %w", err)
|
||||
log.Info("Removed the runner with the same name, re-queuing the reconciliation")
|
||||
return nil, fmt.Errorf("%w: runner existed belonging to the scale set, retry configuration", retryableError)
|
||||
}
|
||||
|
||||
// We want to continue without a requeue for faster pod creation.
|
||||
//
|
||||
// To do so, we update the status in-place, so that both continuing the loop and
|
||||
// and requeuing and skipping updateStatusWithRunnerConfig in the next loop, will
|
||||
// have the same effect.
|
||||
ephemeralRunner.Status.RunnerId = jitConfig.Runner.Id
|
||||
ephemeralRunner.Status.RunnerName = jitConfig.Runner.Name
|
||||
ephemeralRunner.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
|
||||
|
||||
log.Info("Updated ephemeral runner status with runnerId and runnerJITConfig")
|
||||
return nil, nil
|
||||
return nil, fmt.Errorf("%w: runner with the same name but doesn't belong to this RunnerScaleSet: %w", fatalError, err)
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) {
|
||||
@@ -688,21 +685,21 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
|
||||
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, jitConfig *actions.RunnerScaleSetJitRunnerConfig, log logr.Logger) (*corev1.Secret, error) {
|
||||
log.Info("Creating new secret for ephemeral runner")
|
||||
jitSecret := r.newEphemeralRunnerJitSecret(runner)
|
||||
jitSecret := r.newEphemeralRunnerJitSecret(runner, jitConfig)
|
||||
|
||||
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %w", err)
|
||||
return nil, fmt.Errorf("failed to set controller reference: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Created new secret spec for ephemeral runner")
|
||||
if err := r.Create(ctx, jitSecret); err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to create jit secret: %w", err)
|
||||
return nil, fmt.Errorf("failed to create jit secret: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name)
|
||||
return nil, nil
|
||||
return jitSecret, nil
|
||||
}
|
||||
|
||||
// updateRunStatusFromPod is responsible for updating non-exiting statuses.
|
||||
@@ -752,35 +749,6 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
// runnerRegisteredWithService checks if the runner is still registered with the service
|
||||
// Returns found=false and err=nil if ephemeral runner does not exist in GitHub service and should be deleted
|
||||
func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (found bool, err error) {
|
||||
actionsClient, err := r.GetActionsService(ctx, runner)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get Actions client for ScaleSet: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Checking if runner exists in GitHub service", "runnerId", runner.Status.RunnerId)
|
||||
_, err = actionsClient.GetRunner(ctx, int64(runner.Status.RunnerId))
|
||||
if err != nil {
|
||||
actionsError := &actions.ActionsError{}
|
||||
if !errors.As(err, &actionsError) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if actionsError.StatusCode != http.StatusNotFound ||
|
||||
!actionsError.IsException("AgentNotFoundException") {
|
||||
return false, fmt.Errorf("failed to check if runner exists in GitHub service: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Runner does not exist in GitHub service", "runnerId", runner.Status.RunnerId)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Info("Runner exists in GitHub service", "runnerId", runner.Status.RunnerId)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||
client, err := r.GetActionsService(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
|
||||
@@ -176,7 +176,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
).Should(BeEquivalentTo(ephemeralRunner.Name))
|
||||
})
|
||||
|
||||
It("It should re-create pod on failure", func() {
|
||||
It("It should re-create pod on failure and no job assigned", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
@@ -200,6 +200,67 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should delete ephemeral runner on failure and job assigned", func() {
|
||||
er := new(v1alpha1.EphemeralRunner)
|
||||
// Check if finalizer is added
|
||||
Eventually(
|
||||
func() error {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
|
||||
return err
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(Succeed(), "failed to get ephemeral runner")
|
||||
|
||||
// update job id to simulate job assigned
|
||||
er.Status.JobID = "1"
|
||||
err := k8sClient.Status().Update(ctx, er)
|
||||
Expect(err).To(BeNil(), "failed to update ephemeral runner status")
|
||||
|
||||
er = new(v1alpha1.EphemeralRunner)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return er.Status.JobID, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo("1"))
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}).Should(BeEquivalentTo(true))
|
||||
|
||||
// delete pod to simulate failure
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
err = k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
er = new(v1alpha1.EphemeralRunner)
|
||||
Eventually(
|
||||
func() bool {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
|
||||
return kerrors.IsNotFound(err)
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
|
||||
})
|
||||
|
||||
It("It should failed if a pod template is invalid", func() {
|
||||
invalideEphemeralRunner := newExampleRunner("invalid-ephemeral-runner", autoscalingNS.Name, configSecret.Name)
|
||||
invalideEphemeralRunner.Spec.Spec.PriorityClassName = "notexist"
|
||||
@@ -208,13 +269,22 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() (corev1.PodPhase, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: invalideEphemeralRunner.Name, Namespace: invalideEphemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodFailed))
|
||||
Eventually(
|
||||
func() (corev1.PodPhase, error) {
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
client.ObjectKey{Name: invalideEphemeralRunner.Name, Namespace: invalideEphemeralRunner.Namespace},
|
||||
updated,
|
||||
)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(corev1.PodFailed))
|
||||
|
||||
Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
|
||||
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
|
||||
})
|
||||
@@ -675,53 +745,6 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should re-create pod on exit status 0, but runner exists within the service", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "failed to update pod status")
|
||||
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(updated.Status.Failures) == 1, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||
|
||||
// should re-create after failure
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
pod := new(corev1.Pod)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should not set the phase to succeeded without pod termination status", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
@@ -822,7 +845,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
startManagers(GinkgoT(), mgr)
|
||||
})
|
||||
|
||||
It("It should set the Phase to Succeeded", func() {
|
||||
It("It should delete EphemeralRunner when pod exits successfully", func() {
|
||||
ephemeralRunner := newExampleRunner("test-runner", autoscalingNS.Name, configSecret.Name)
|
||||
|
||||
err := k8sClient.Create(ctx, ephemeralRunner)
|
||||
@@ -848,13 +871,18 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
Expect(err).To(BeNil(), "failed to update pod status")
|
||||
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() (corev1.PodPhase, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodSucceeded))
|
||||
Eventually(
|
||||
func() bool {
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace},
|
||||
updated,
|
||||
)
|
||||
return kerrors.IsNotFound(err)
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -453,8 +453,13 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
|
||||
continue
|
||||
}
|
||||
|
||||
if !isDone && ephemeralRunner.Status.JobRequestId > 0 {
|
||||
log.Info("Skipping ephemeral runner since it is running a job", "name", ephemeralRunner.Name, "jobRequestId", ephemeralRunner.Status.JobRequestId)
|
||||
if !isDone && ephemeralRunner.HasJob() {
|
||||
log.Info(
|
||||
"Skipping ephemeral runner since it is running a job",
|
||||
"name", ephemeralRunner.Name,
|
||||
"workflowRunId", ephemeralRunner.Status.WorkflowRunId,
|
||||
"jobId", ephemeralRunner.Status.JobID,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
12
controllers/actions.github.com/error.go
Normal file
12
controllers/actions.github.com/error.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package actionsgithubcom
|
||||
|
||||
type controllerError string
|
||||
|
||||
func (e controllerError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
const (
|
||||
retryableError = controllerError("retryable error")
|
||||
fatalError = controllerError("fatal error")
|
||||
)
|
||||
@@ -618,7 +618,7 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
|
||||
FilterLabels(labels, LabelKeyRunnerTemplateHash),
|
||||
annotations,
|
||||
runner.Spec,
|
||||
runner.Status.RunnerJITConfig,
|
||||
secret.Data,
|
||||
)
|
||||
|
||||
objectMeta := metav1.ObjectMeta{
|
||||
@@ -671,14 +671,17 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
|
||||
return &newPod
|
||||
}
|
||||
|
||||
func (b *ResourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret {
|
||||
func (b *ResourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner, jitConfig *actions.RunnerScaleSetJitRunnerConfig) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ephemeralRunner.Name,
|
||||
Namespace: ephemeralRunner.Namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
jitTokenKey: []byte(ephemeralRunner.Status.RunnerJITConfig),
|
||||
jitTokenKey: []byte(jitConfig.EncodedJITConfig),
|
||||
"runnerName": []byte(jitConfig.Runner.Name),
|
||||
"runnerId": []byte(strconv.Itoa(jitConfig.Runner.Id)),
|
||||
"scaleSetId": []byte(strconv.Itoa(jitConfig.Runner.RunnerScaleSetId)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1046,12 +1046,12 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
||||
// overridden
|
||||
if ok, _ := envVarPresent("DOCKER_GROUP_GID", dockerdContainer.Env); !ok {
|
||||
gid := d.DockerGID
|
||||
// We default to gid 121 for Ubuntu 22.04 images
|
||||
// We default to gid 121 for Ubuntu 22.04 and 24.04 images
|
||||
// See below for more details
|
||||
// - https://github.com/actions/actions-runner-controller/issues/2490#issuecomment-1501561923
|
||||
// - https://github.com/actions/actions-runner-controller/blob/8869ad28bb5a1daaedefe0e988571fe1fb36addd/runner/actions-runner.ubuntu-20.04.dockerfile#L14
|
||||
// - https://github.com/actions/actions-runner-controller/blob/8869ad28bb5a1daaedefe0e988571fe1fb36addd/runner/actions-runner.ubuntu-22.04.dockerfile#L12
|
||||
if strings.Contains(runnerContainer.Image, "22.04") {
|
||||
if strings.Contains(runnerContainer.Image, "22.04") || strings.Contains(runnerContainer.Image, "24.04") {
|
||||
gid = "121"
|
||||
} else if strings.Contains(runnerContainer.Image, "20.04") {
|
||||
gid = "1001"
|
||||
|
||||
@@ -188,7 +188,7 @@ Create one using e.g. `eksctl`. You can refer to [the EKS documentation](https:/
|
||||
|
||||
Once you set up the service account, all you need is to add `serviceAccountName` and `fsGroup` to any pods that use the IAM-role enabled service account.
|
||||
|
||||
`fsGroup` needs to be set to the UID of the `runner` Linux user that runs the runner agent (and dockerd in case you use dind-runner). For anyone using an Ubuntu 20.04 runner image it's `1000` and for Ubuntu 22.04 one it's `1001`.
|
||||
`fsGroup` needs to be set to the UID of the `runner` Linux user that runs the runner agent (and dockerd in case you use dind-runner). For anyone using an Ubuntu 20.04 runner image it's `1000` and for Ubuntu 22.04 and 24.04 one it's `1001`.
|
||||
|
||||
For `RunnerDeployment`, you can set those two fields under the runner spec at `RunnerDeployment.Spec.Template`:
|
||||
|
||||
@@ -200,12 +200,12 @@ metadata:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
repository: USER/REO
|
||||
repository: USER/REPO
|
||||
serviceAccountName: my-service-account
|
||||
securityContext:
|
||||
# For Ubuntu 20.04 runner
|
||||
fsGroup: 1000
|
||||
# Use 1001 for Ubuntu 22.04 runner
|
||||
# Use 1001 for Ubuntu 22.04 and 24.04 runner
|
||||
#fsGroup: 1001
|
||||
```
|
||||
|
||||
|
||||
@@ -43,6 +43,14 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
|
||||
|
||||
## Changelog
|
||||
|
||||
### 0.12.1
|
||||
|
||||
1. Fix indentation of startupProbe attributes in dind sidecar [#4126](https://github.com/actions/actions-runner-controller/pull/4126)
|
||||
1. Remove duplicate float64 call [#4139](https://github.com/actions/actions-runner-controller/pull/4139)
|
||||
1. Fix dind sidecar template [#4128](https://github.com/actions/actions-runner-controller/pull/4128)
|
||||
1. Remove check if runner exists after exit code 0 [#4142](https://github.com/actions/actions-runner-controller/pull/4142)
|
||||
1. Explicitly requeue during backoff ephemeral runner [#4152](https://github.com/actions/actions-runner-controller/pull/4152)
|
||||
|
||||
### 0.12.0
|
||||
|
||||
1. Allow use of client id as an app id [#4057](https://github.com/actions/actions-runner-controller/pull/4057)
|
||||
|
||||
@@ -37,7 +37,7 @@ type JobAssigned struct {
|
||||
}
|
||||
|
||||
type JobStarted struct {
|
||||
RunnerId int `json:"runnerId"`
|
||||
RunnerID int `json:"runnerId"`
|
||||
RunnerName string `json:"runnerName"`
|
||||
JobMessageBase
|
||||
}
|
||||
@@ -55,12 +55,13 @@ type JobMessageType struct {
|
||||
|
||||
type JobMessageBase struct {
|
||||
JobMessageType
|
||||
RunnerRequestId int64 `json:"runnerRequestId"`
|
||||
RunnerRequestID int64 `json:"runnerRequestId"`
|
||||
RepositoryName string `json:"repositoryName"`
|
||||
OwnerName string `json:"ownerName"`
|
||||
JobID string `json:"jobId"`
|
||||
JobWorkflowRef string `json:"jobWorkflowRef"`
|
||||
JobDisplayName string `json:"jobDisplayName"`
|
||||
WorkflowRunId int64 `json:"workflowRunId"`
|
||||
WorkflowRunID int64 `json:"workflowRunId"`
|
||||
EventName string `json:"eventName"`
|
||||
RequestLabels []string `json:"requestLabels"`
|
||||
QueueTime time.Time `json:"queueTime"`
|
||||
|
||||
2
main.go
2
main.go
@@ -124,7 +124,7 @@ func main() {
|
||||
flag.StringVar(&leaderElectionId, "leader-election-id", "actions-runner-controller", "Controller id for leader election.")
|
||||
flag.StringVar(&runnerPodDefaults.RunnerImage, "runner-image", defaultRunnerImage, "The image name of self-hosted runner container to use by default if one isn't defined in yaml.")
|
||||
flag.StringVar(&runnerPodDefaults.DockerImage, "docker-image", defaultDockerImage, "The image name of docker sidecar container to use by default if one isn't defined in yaml.")
|
||||
flag.StringVar(&runnerPodDefaults.DockerGID, "docker-gid", defaultDockerGID, "The default GID of docker group in the docker sidecar container. Use 1001 for dockerd sidecars of Ubuntu 20.04 runners 121 for Ubuntu 22.04.")
|
||||
flag.StringVar(&runnerPodDefaults.DockerGID, "docker-gid", defaultDockerGID, "The default GID of docker group in the docker sidecar container. Use 1001 for dockerd sidecars of Ubuntu 20.04 runners 121 for Ubuntu 22.04 and 24.04.")
|
||||
flag.Var(&runnerImagePullSecrets, "runner-image-pull-secret", "The default image-pull secret name for self-hosted runner container.")
|
||||
flag.StringVar(&runnerPodDefaults.DockerRegistryMirror, "docker-registry-mirror", "", "The default Docker Registry Mirror used by runners.")
|
||||
flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.")
|
||||
|
||||
@@ -6,8 +6,8 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless
|
||||
OS_IMAGE ?= ubuntu-22.04
|
||||
TARGETPLATFORM ?= $(shell arch)
|
||||
|
||||
RUNNER_VERSION ?= 2.325.0
|
||||
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.7.0
|
||||
RUNNER_VERSION ?= 2.328.0
|
||||
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.8.0
|
||||
DOCKER_VERSION ?= 24.0.7
|
||||
|
||||
# default list of platforms for which multiarch image is built
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
RUNNER_VERSION=2.325.0
|
||||
RUNNER_CONTAINER_HOOKS_VERSION=0.7.0
|
||||
RUNNER_VERSION=2.328.0
|
||||
RUNNER_CONTAINER_HOOKS_VERSION=0.8.0
|
||||
135
runner/actions-runner-dind-rootless.ubuntu-24.04.dockerfile
Normal file
135
runner/actions-runner-dind-rootless.ubuntu-24.04.dockerfile
Normal file
@@ -0,0 +1,135 @@
|
||||
FROM ubuntu:24.04
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG RUNNER_VERSION
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION
|
||||
# Docker and Docker Compose arguments
|
||||
ENV CHANNEL=stable
|
||||
ARG DOCKER_COMPOSE_VERSION=v2.23.0
|
||||
ARG DUMB_INIT_VERSION=1.2.5
|
||||
ARG RUNNER_USER_UID=1001
|
||||
|
||||
# Other arguments
|
||||
ARG DEBUG=false
|
||||
|
||||
RUN test -n "$TARGETPLATFORM" || (echo "TARGETPLATFORM must be set" && false)
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update -y \
|
||||
&& apt-get install -y software-properties-common \
|
||||
&& add-apt-repository -y ppa:git-core/ppa \
|
||||
&& apt-get update -y \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
curl \
|
||||
ca-certificates \
|
||||
git \
|
||||
iproute2 \
|
||||
iptables \
|
||||
jq \
|
||||
sudo \
|
||||
uidmap \
|
||||
unzip \
|
||||
zip \
|
||||
fuse-overlayfs \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Download latest git-lfs version
|
||||
RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash && \
|
||||
apt-get install -y --no-install-recommends git-lfs
|
||||
|
||||
# Runner user
|
||||
RUN adduser --disabled-password --gecos "" --uid $RUNNER_USER_UID runner
|
||||
|
||||
ENV HOME=/home/runner
|
||||
|
||||
# Set-up subuid and subgid so that "--userns-remap=default" works
|
||||
RUN set -eux; \
|
||||
addgroup --system dockremap; \
|
||||
adduser --system --ingroup dockremap dockremap; \
|
||||
echo 'dockremap:165536:65536' >> /etc/subuid; \
|
||||
echo 'dockremap:165536:65536' >> /etc/subgid
|
||||
|
||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
||||
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \
|
||||
&& curl -fLo /usr/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v${DUMB_INIT_VERSION}/dumb-init_${DUMB_INIT_VERSION}_${ARCH} \
|
||||
&& chmod +x /usr/bin/dumb-init
|
||||
|
||||
ENV RUNNER_ASSETS_DIR=/runnertmp
|
||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "x86_64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x64 ; fi \
|
||||
&& mkdir -p "$RUNNER_ASSETS_DIR" \
|
||||
&& cd "$RUNNER_ASSETS_DIR" \
|
||||
&& curl -fLo runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${ARCH}-${RUNNER_VERSION}.tar.gz \
|
||||
&& tar xzf ./runner.tar.gz \
|
||||
&& rm runner.tar.gz \
|
||||
&& ./bin/installdependencies.sh \
|
||||
&& mv ./externals ./externalstmp \
|
||||
# libyaml-dev is required for ruby/setup-ruby action.
|
||||
# It is installed after installdependencies.sh and before removing /var/lib/apt/lists
|
||||
# to avoid rerunning apt-update on its own.
|
||||
&& apt-get install -y libyaml-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV RUNNER_TOOL_CACHE=/opt/hostedtoolcache
|
||||
RUN mkdir /opt/hostedtoolcache \
|
||||
&& chgrp runner /opt/hostedtoolcache \
|
||||
&& chmod g+rwx /opt/hostedtoolcache
|
||||
|
||||
RUN cd "$RUNNER_ASSETS_DIR" \
|
||||
&& curl -fLo runner-container-hooks.zip https://github.com/actions/runner-container-hooks/releases/download/v${RUNNER_CONTAINER_HOOKS_VERSION}/actions-runner-hooks-k8s-${RUNNER_CONTAINER_HOOKS_VERSION}.zip \
|
||||
&& unzip ./runner-container-hooks.zip -d ./k8s \
|
||||
&& rm -f runner-container-hooks.zip
|
||||
|
||||
# Make the rootless runner directory executable
|
||||
RUN mkdir /run/user/1000 \
|
||||
&& chown runner:runner /run/user/1000 \
|
||||
&& chmod a+x /run/user/1000
|
||||
|
||||
# We place the scripts in `/usr/bin` so that users who extend this image can
|
||||
# override them with scripts of the same name placed in `/usr/local/bin`.
|
||||
COPY entrypoint-dind-rootless.sh startup.sh logger.sh graceful-stop.sh update-status /usr/bin/
|
||||
RUN chmod +x /usr/bin/entrypoint-dind-rootless.sh /usr/bin/startup.sh
|
||||
|
||||
# Copy the docker shim which propagates the docker MTU to underlying networks
|
||||
# to replace the docker binary in the PATH.
|
||||
COPY docker-shim.sh /usr/local/bin/docker
|
||||
|
||||
# Configure hooks folder structure.
|
||||
COPY hooks /etc/arc/hooks/
|
||||
|
||||
# Add the Python "User Script Directory" to the PATH
|
||||
ENV PATH="${PATH}:${HOME}/.local/bin:/home/runner/bin"
|
||||
ENV ImageOS=ubuntu22
|
||||
ENV DOCKER_HOST=unix:///run/user/1000/docker.sock
|
||||
ENV XDG_RUNTIME_DIR=/run/user/1000
|
||||
|
||||
RUN echo "PATH=${PATH}" > /etc/environment \
|
||||
&& echo "ImageOS=${ImageOS}" >> /etc/environment \
|
||||
&& echo "DOCKER_HOST=${DOCKER_HOST}" >> /etc/environment \
|
||||
&& echo "XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR}" >> /etc/environment
|
||||
|
||||
# No group definition, as that makes it harder to run docker.
|
||||
USER runner
|
||||
|
||||
# This will install docker under $HOME/bin according to the content of the script
|
||||
RUN export SKIP_IPTABLES=1 \
|
||||
&& curl -fsSL https://get.docker.com/rootless | sh \
|
||||
&& /home/runner/bin/docker -v
|
||||
|
||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
||||
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \
|
||||
&& mkdir -p /home/runner/.docker/cli-plugins \
|
||||
&& curl -fLo /home/runner/.docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \
|
||||
&& chmod +x /home/runner/.docker/cli-plugins/docker-compose \
|
||||
&& ln -s /home/runner/.docker/cli-plugins/docker-compose /home/runner/bin/docker-compose \
|
||||
&& which docker-compose \
|
||||
&& docker compose version
|
||||
|
||||
# Create folder structure here to avoid permission issues
|
||||
# when mounting the daemon.json file from a configmap.
|
||||
RUN mkdir -p /home/runner/.config/docker
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "-c"]
|
||||
CMD ["entrypoint-dind-rootless.sh"]
|
||||
120
runner/actions-runner-dind.ubuntu-24.04.dockerfile
Normal file
120
runner/actions-runner-dind.ubuntu-24.04.dockerfile
Normal file
@@ -0,0 +1,120 @@
|
||||
FROM ubuntu:24.04
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG RUNNER_VERSION
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION
|
||||
# Docker and Docker Compose arguments
|
||||
ARG CHANNEL=stable
|
||||
ARG DOCKER_VERSION=24.0.7
|
||||
ARG DOCKER_COMPOSE_VERSION=v2.23.0
|
||||
ARG DUMB_INIT_VERSION=1.2.5
|
||||
ARG RUNNER_USER_UID=1001
|
||||
ARG DOCKER_GROUP_GID=121
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update -y \
|
||||
&& apt-get install -y software-properties-common \
|
||||
&& add-apt-repository -y ppa:git-core/ppa \
|
||||
&& apt-get update -y \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
curl \
|
||||
ca-certificates \
|
||||
git \
|
||||
iptables \
|
||||
jq \
|
||||
software-properties-common \
|
||||
sudo \
|
||||
unzip \
|
||||
zip \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Download latest git-lfs version
|
||||
RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash && \
|
||||
apt-get install -y --no-install-recommends git-lfs
|
||||
|
||||
# Runner user
|
||||
RUN adduser --disabled-password --gecos "" --uid $RUNNER_USER_UID runner \
|
||||
&& groupadd docker --gid $DOCKER_GROUP_GID \
|
||||
&& usermod -aG sudo runner \
|
||||
&& usermod -aG docker runner \
|
||||
&& echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers \
|
||||
&& echo "Defaults env_keep += \"DEBIAN_FRONTEND\"" >> /etc/sudoers
|
||||
|
||||
ENV HOME=/home/runner
|
||||
|
||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
||||
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \
|
||||
&& curl -fLo /usr/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v${DUMB_INIT_VERSION}/dumb-init_${DUMB_INIT_VERSION}_${ARCH} \
|
||||
&& chmod +x /usr/bin/dumb-init
|
||||
|
||||
ENV RUNNER_ASSETS_DIR=/runnertmp
|
||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "x86_64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x64 ; fi \
|
||||
&& mkdir -p "$RUNNER_ASSETS_DIR" \
|
||||
&& cd "$RUNNER_ASSETS_DIR" \
|
||||
&& curl -fLo runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${ARCH}-${RUNNER_VERSION}.tar.gz \
|
||||
&& tar xzf ./runner.tar.gz \
|
||||
&& rm -f runner.tar.gz \
|
||||
&& ./bin/installdependencies.sh \
|
||||
# libyaml-dev is required for ruby/setup-ruby action.
|
||||
# It is installed after installdependencies.sh and before removing /var/lib/apt/lists
|
||||
# to avoid rerunning apt-update on its own.
|
||||
&& apt-get install -y libyaml-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV RUNNER_TOOL_CACHE=/opt/hostedtoolcache
|
||||
RUN mkdir /opt/hostedtoolcache \
|
||||
&& chgrp docker /opt/hostedtoolcache \
|
||||
&& chmod g+rwx /opt/hostedtoolcache
|
||||
|
||||
RUN cd "$RUNNER_ASSETS_DIR" \
|
||||
&& curl -fLo runner-container-hooks.zip https://github.com/actions/runner-container-hooks/releases/download/v${RUNNER_CONTAINER_HOOKS_VERSION}/actions-runner-hooks-k8s-${RUNNER_CONTAINER_HOOKS_VERSION}.zip \
|
||||
&& unzip ./runner-container-hooks.zip -d ./k8s \
|
||||
&& rm -f runner-container-hooks.zip
|
||||
|
||||
RUN set -vx; \
|
||||
export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
||||
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \
|
||||
&& curl -fLo docker.tgz https://download.docker.com/linux/static/${CHANNEL}/${ARCH}/docker-${DOCKER_VERSION}.tgz \
|
||||
&& tar zxvf docker.tgz \
|
||||
&& install -o root -g root -m 755 docker/* /usr/bin/ \
|
||||
&& rm -rf docker docker.tgz
|
||||
|
||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
||||
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \
|
||||
&& mkdir -p /usr/libexec/docker/cli-plugins \
|
||||
&& curl -fLo /usr/libexec/docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \
|
||||
&& chmod +x /usr/libexec/docker/cli-plugins/docker-compose \
|
||||
&& ln -s /usr/libexec/docker/cli-plugins/docker-compose /usr/bin/docker-compose \
|
||||
&& which docker-compose \
|
||||
&& docker compose version
|
||||
|
||||
# We place the scripts in `/usr/bin` so that users who extend this image can
|
||||
# override them with scripts of the same name placed in `/usr/local/bin`.
|
||||
COPY entrypoint-dind.sh startup.sh logger.sh wait.sh graceful-stop.sh update-status /usr/bin/
|
||||
RUN chmod +x /usr/bin/entrypoint-dind.sh /usr/bin/startup.sh
|
||||
|
||||
# Copy the docker shim which propagates the docker MTU to underlying networks
|
||||
# to replace the docker binary in the PATH.
|
||||
COPY docker-shim.sh /usr/local/bin/docker
|
||||
|
||||
# Configure hooks folder structure.
|
||||
COPY hooks /etc/arc/hooks/
|
||||
|
||||
VOLUME /var/lib/docker
|
||||
|
||||
# Add the Python "User Script Directory" to the PATH
|
||||
ENV PATH="${PATH}:${HOME}/.local/bin"
|
||||
ENV ImageOS=ubuntu24
|
||||
|
||||
RUN echo "PATH=${PATH}" > /etc/environment \
|
||||
&& echo "ImageOS=${ImageOS}" >> /etc/environment
|
||||
|
||||
# No group definition, as that makes it harder to run docker.
|
||||
USER runner
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "-c"]
|
||||
CMD ["entrypoint-dind.sh"]
|
||||
114
runner/actions-runner.ubuntu-24.04.dockerfile
Normal file
114
runner/actions-runner.ubuntu-24.04.dockerfile
Normal file
@@ -0,0 +1,114 @@
|
||||
FROM ubuntu:24.04
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG RUNNER_VERSION
|
||||
ARG RUNNER_CONTAINER_HOOKS_VERSION
|
||||
# Docker and Docker Compose arguments
|
||||
ARG CHANNEL=stable
|
||||
ARG DOCKER_VERSION=24.0.7
|
||||
ARG DOCKER_COMPOSE_VERSION=v2.23.0
|
||||
ARG DUMB_INIT_VERSION=1.2.5
|
||||
ARG RUNNER_USER_UID=1001
|
||||
ARG DOCKER_GROUP_GID=121
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update -y \
|
||||
&& apt-get install -y software-properties-common \
|
||||
&& add-apt-repository -y ppa:git-core/ppa \
|
||||
&& apt-get update -y \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
curl \
|
||||
ca-certificates \
|
||||
git \
|
||||
jq \
|
||||
sudo \
|
||||
unzip \
|
||||
zip \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Download latest git-lfs version
|
||||
RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash && \
|
||||
apt-get install -y --no-install-recommends git-lfs
|
||||
|
||||
RUN adduser --disabled-password --gecos "" --uid $RUNNER_USER_UID runner \
|
||||
&& groupadd docker --gid $DOCKER_GROUP_GID \
|
||||
&& usermod -aG sudo runner \
|
||||
&& usermod -aG docker runner \
|
||||
&& echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers \
|
||||
&& echo "Defaults env_keep += \"DEBIAN_FRONTEND\"" >> /etc/sudoers
|
||||
|
||||
ENV HOME=/home/runner
|
||||
|
||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
||||
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \
|
||||
&& curl -fLo /usr/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v${DUMB_INIT_VERSION}/dumb-init_${DUMB_INIT_VERSION}_${ARCH} \
|
||||
&& chmod +x /usr/bin/dumb-init
|
||||
|
||||
ENV RUNNER_ASSETS_DIR=/runnertmp
|
||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "x86_64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x64 ; fi \
|
||||
&& mkdir -p "$RUNNER_ASSETS_DIR" \
|
||||
&& cd "$RUNNER_ASSETS_DIR" \
|
||||
&& curl -fLo runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${ARCH}-${RUNNER_VERSION}.tar.gz \
|
||||
&& tar xzf ./runner.tar.gz \
|
||||
&& rm runner.tar.gz \
|
||||
&& ./bin/installdependencies.sh \
|
||||
&& mv ./externals ./externalstmp \
|
||||
# libyaml-dev is required for ruby/setup-ruby action.
|
||||
# It is installed after installdependencies.sh and before removing /var/lib/apt/lists
|
||||
# to avoid rerunning apt-update on its own.
|
||||
&& apt-get install -y libyaml-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV RUNNER_TOOL_CACHE=/opt/hostedtoolcache
|
||||
RUN mkdir /opt/hostedtoolcache \
|
||||
&& chgrp docker /opt/hostedtoolcache \
|
||||
&& chmod g+rwx /opt/hostedtoolcache
|
||||
|
||||
RUN cd "$RUNNER_ASSETS_DIR" \
|
||||
&& curl -fLo runner-container-hooks.zip https://github.com/actions/runner-container-hooks/releases/download/v${RUNNER_CONTAINER_HOOKS_VERSION}/actions-runner-hooks-k8s-${RUNNER_CONTAINER_HOOKS_VERSION}.zip \
|
||||
&& unzip ./runner-container-hooks.zip -d ./k8s \
|
||||
&& rm -f runner-container-hooks.zip
|
||||
|
||||
RUN set -vx; \
|
||||
export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
||||
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \
|
||||
&& curl -fLo docker.tgz https://download.docker.com/linux/static/${CHANNEL}/${ARCH}/docker-${DOCKER_VERSION}.tgz \
|
||||
&& tar zxvf docker.tgz \
|
||||
&& install -o root -g root -m 755 docker/docker /usr/bin/docker \
|
||||
&& rm -rf docker docker.tgz
|
||||
|
||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
||||
&& if [ "$ARCH" = "amd64" ] || [ "$ARCH" = "i386" ]; then export ARCH=x86_64 ; fi \
|
||||
&& mkdir -p /usr/libexec/docker/cli-plugins \
|
||||
&& curl -fLo /usr/libexec/docker/cli-plugins/docker-compose https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-linux-${ARCH} \
|
||||
&& chmod +x /usr/libexec/docker/cli-plugins/docker-compose \
|
||||
&& ln -s /usr/libexec/docker/cli-plugins/docker-compose /usr/bin/docker-compose \
|
||||
&& which docker-compose \
|
||||
&& docker compose version
|
||||
|
||||
# We place the scripts in `/usr/bin` so that users who extend this image can
|
||||
# override them with scripts of the same name placed in `/usr/local/bin`.
|
||||
COPY entrypoint.sh startup.sh logger.sh graceful-stop.sh update-status /usr/bin/
|
||||
|
||||
# Copy the docker shim which propagates the docker MTU to underlying networks
|
||||
# to replace the docker binary in the PATH.
|
||||
COPY docker-shim.sh /usr/local/bin/docker
|
||||
|
||||
# Configure hooks folder structure.
|
||||
COPY hooks /etc/arc/hooks/
|
||||
|
||||
# Add the Python "User Script Directory" to the PATH
|
||||
ENV PATH="${PATH}:${HOME}/.local/bin/"
|
||||
ENV ImageOS=ubuntu24
|
||||
|
||||
RUN echo "PATH=${PATH}" > /etc/environment \
|
||||
&& echo "ImageOS=${ImageOS}" >> /etc/environment
|
||||
|
||||
USER runner
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "-c"]
|
||||
CMD ["entrypoint.sh"]
|
||||
@@ -36,8 +36,8 @@ var (
|
||||
|
||||
testResultCMNamePrefix = "test-result-"
|
||||
|
||||
RunnerVersion = "2.325.0"
|
||||
RunnerContainerHooksVersion = "0.7.0"
|
||||
RunnerVersion = "2.328.0"
|
||||
RunnerContainerHooksVersion = "0.8.0"
|
||||
)
|
||||
|
||||
// If you're willing to run this test via VS Code "run test" or "debug test",
|
||||
|
||||
Reference in New Issue
Block a user