Compare commits

...

28 Commits

Author SHA1 Message Date
Bassem Dghaidi
59cb1d2c8b Prepare 0.10.0 release (#3849) 2024-12-16 11:39:55 +01:00
dependabot[bot]
fd8f76b91c Bump golang.org/x/crypto from 0.22.0 to 0.31.0 (#3844)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com>
2024-12-13 15:57:08 +01:00
Bassem Dghaidi
7e04027d19 Make k8s client rate limiter parameters configurable (#3848)
Co-authored-by: Taketoshi Fujiwara <t-b-fujiwara@mercari.com>
2024-12-13 15:37:01 +01:00
Ken Muse
488b0956fd Update docs with details for the dashboard visualizations (#3696)
Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com>
2024-12-13 14:50:55 +01:00
dependabot[bot]
3c14ee0652 Bump github.com/bradleyfalzon/ghinstallation/v2 from 2.8.0 to 2.12.0 (#3837)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com>
2024-12-11 21:38:46 +01:00
Yusuke Kuoka
32ae917937 Make EphemeralRunnerReconciler create runner pods earlier (#3831)
Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com>
2024-12-11 21:28:29 +01:00
Yusuke Kuoka
3998f6dee6 Make EphemeralRunnerController MaxConcurrentReconciles configurable (#3832)
Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com>
2024-12-11 21:19:43 +01:00
Bassem Dghaidi
835bc2aed8 Fix ARC e2e tests (#3836) 2024-12-11 14:25:29 +01:00
github-actions[bot]
8b36ea90eb Updates: runner to v2.321.0 container-hooks to v0.6.2 (#3809)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2024-11-14 17:39:49 +01:00
github-actions[bot]
96d1bbcf2f Updates: runner to v2.320.0 (#3763)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2024-10-08 12:51:03 -04:00
Bassem Dghaidi
90b68fec1a Add exponential backoff when generating runner reg tokens (#3724) 2024-09-04 12:23:31 +02:00
github-actions[bot]
1be410ba80 Updates: runner to v2.319.1 (#3708)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Bassem Dghaidi <568794+Link-@users.noreply.github.com>
2024-08-20 12:22:06 +02:00
github-actions[bot]
930c9db6e7 Updates: runner to v2.319.0 (#3702)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2024-08-20 11:30:43 +02:00
github-actions[bot]
a152741a1a Updates: runner to v2.318.0 container-hooks to v0.6.1 (#3684)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2024-07-26 13:52:44 -04:00
Nikola Jokic
80d848339e Prepare 0.9.3 release (#3624) 2024-06-25 12:35:39 +02:00
dependabot[bot]
8535a24135 Bump github.com/hashicorp/go-retryablehttp from 0.7.5 to 0.7.7 (#3623)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-25 10:18:13 +02:00
Nikola Jokic
b349ded2be Increase test timeouts to avoid CI test failures (#3554) 2024-06-21 13:45:48 +02:00
Nikola Jokic
6276c84493 AutoscalingListener controller: Inspect listener container state instead of pod phase (#3548) 2024-06-21 13:40:08 +02:00
Nikola Jokic
4a8420ce96 Update forgotten azure/setup-helm action (#3612) 2024-06-21 13:31:36 +02:00
Nikola Jokic
a62ca3d853 Exclude label prefix propagation (#3607) 2024-06-21 12:12:14 +02:00
Nikola Jokic
4eb038eaa1 Bump node actions (#3569) 2024-06-21 12:11:29 +02:00
Nikola Jokic
b2c6992e84 Check status code of fetch access token for github app (#3568) 2024-06-21 12:10:56 +02:00
Nikola Jokic
0a6208e38d Bump Go patch version to 1.22.4 (#3593) 2024-06-17 10:36:23 +02:00
Nikola Jokic
2cc793a835 Remove .Named() from the ephemeral runner controller (#3596) 2024-06-17 10:36:08 +02:00
github-actions[bot]
894732732a Updates: runner to v2.317.0 (#3559)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2024-06-07 11:53:30 +02:00
Serge
e45ac190e2 Customize work directory (#3477) 2024-06-04 15:16:45 +02:00
Katarzyna
d0fb7206a4 Fix problem with ephemeralRunner Succeeded state before build executed (#3528) 2024-06-03 10:49:45 +02:00
Nikola Jokic
9afd93065f Remove finalizers in one pass to speed up cleanups AutoscalingRunnerSet (#3536) 2024-05-27 09:21:31 +02:00
47 changed files with 820 additions and 350 deletions

View File

@@ -47,7 +47,7 @@ runs:
-d '{"ref": "main", "inputs": { "arc_name": "${{inputs.arc-name}}" } }'
- name: Fetch workflow run & job ids
uses: actions/github-script@v6
uses: actions/github-script@v7
id: query_workflow
with:
script: |
@@ -128,7 +128,7 @@ runs:
- name: Wait for workflow to start running
if: inputs.wait-to-running == 'true' && inputs.wait-to-finish == 'false'
uses: actions/github-script@v6
uses: actions/github-script@v7
with:
script: |
function sleep(ms) {
@@ -156,7 +156,7 @@ runs:
- name: Wait for workflow to finish successfully
if: inputs.wait-to-finish == 'true'
uses: actions/github-script@v6
uses: actions/github-script@v7
with:
script: |
// Wait 5 minutes and make sure the workflow run we triggered completed with result 'success'
@@ -188,6 +188,19 @@ runs:
}
core.setFailed(`The triggered workflow run didn't finish properly using ${{inputs.arc-name}}`)
- name: Gather listener logs
shell: bash
if: always()
run: |
LISTENER_POD="$(kubectl get autoscalinglisteners.actions.github.com -n arc-systems -o jsonpath='{.items[*].metadata.name}')"
kubectl logs $LISTENER_POD -n ${{inputs.arc-controller-namespace}}
- name: Gather coredns logs
shell: bash
if: always()
run: |
kubectl logs deployments/coredns -n kube-system
- name: cleanup
if: inputs.wait-to-finish == 'true'
shell: bash
@@ -195,8 +208,8 @@ runs:
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
kubectl wait --timeout=30s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-namespace}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
- name: Gather logs and cleanup
- name: Gather controller logs
shell: bash
if: always()
run: |
kubectl logs deployment/arc-gha-rs-controller -n ${{inputs.arc-controller-namespace}}
kubectl logs deployment/arc-gha-rs-controller -n ${{inputs.arc-controller-namespace}}

View File

@@ -27,7 +27,7 @@ runs:
using: "composite"
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
@@ -36,7 +36,7 @@ runs:
driver-opts: image=moby/buildkit:v0.10.6
- name: Build controller image
uses: docker/build-push-action@v3
uses: docker/build-push-action@v5
with:
file: Dockerfile
platforms: linux/amd64
@@ -56,7 +56,7 @@ runs:
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ inputs.app-id }}
application_private_key: ${{ inputs.app-pk }}

View File

@@ -24,23 +24,23 @@ runs:
shell: bash
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }}
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
username: ${{ inputs.username }}
password: ${{ inputs.password }}
- name: Login to GitHub Container Registry
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }}
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ inputs.ghcr_username }}

View File

@@ -40,12 +40,12 @@ jobs:
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v3.4
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
@@ -58,7 +58,7 @@ jobs:
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
@@ -134,7 +134,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -145,7 +145,7 @@ jobs:
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
@@ -184,7 +184,7 @@ jobs:
# this workaround is intended to move the index.yaml to the target repo
# where the github pages are hosted
- name: Checkout target repository
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
path: ${{ env.CHART_TARGET_REPO }}

View File

@@ -39,9 +39,9 @@ jobs:
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- uses: actions/setup-go@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
@@ -73,7 +73,7 @@ jobs:
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}

View File

@@ -28,7 +28,7 @@ jobs:
name: Trigger Build and Push of Runner Images
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Get runner version
id: versions
run: |
@@ -39,7 +39,7 @@ jobs:
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}

View File

@@ -21,7 +21,7 @@ jobs:
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Get runner current and latest versions
id: runner_versions
@@ -64,7 +64,7 @@ jobs:
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: PR Name
id: pr_name
@@ -119,7 +119,7 @@ jobs:
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: New branch
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"

View File

@@ -40,13 +40,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
@@ -67,7 +67,7 @@ jobs:
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'

View File

@@ -24,7 +24,7 @@ jobs:
name: runner / shellcheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: shellcheck
uses: reviewdog/action-shellcheck@v1
with:
@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Run tests
run: |

View File

@@ -16,7 +16,7 @@ env:
TARGET_ORG: actions-runner-controller
TARGET_REPO: arc_e2e_test_dummy
IMAGE_NAME: "arc-test-image"
IMAGE_VERSION: "0.9.2"
IMAGE_VERSION: "0.10.0"
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
@@ -33,7 +33,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@@ -103,6 +103,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@@ -122,7 +124,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@@ -194,6 +196,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@@ -213,7 +217,7 @@ jobs:
env:
WORKFLOW_FILE: arc-test-dind-workflow.yaml
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@@ -284,6 +288,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@@ -303,7 +309,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@@ -383,6 +389,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@@ -402,7 +410,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@@ -484,6 +492,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@@ -503,7 +513,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@@ -579,6 +589,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@@ -598,7 +610,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@@ -699,6 +711,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Test ARC E2E
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@@ -718,7 +732,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{github.head_ref}}
@@ -789,6 +803,8 @@ jobs:
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems
sleep 60
- name: Trigger long running jobs and wait for runners to pick them up
uses: ./.github/actions/execute-assert-arc-e2e
timeout-minutes: 10
@@ -888,7 +904,7 @@ jobs:
env:
WORKFLOW_FILE: arc-test-workflow.yaml
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{ github.head_ref }}

View File

@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
@@ -72,10 +72,10 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
@@ -84,14 +84,14 @@ jobs:
driver-opts: image=moby/buildkit:v0.10.6
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build & push controller image
uses: docker/build-push-action@v3
uses: docker/build-push-action@v5
with:
file: Dockerfile
platforms: linux/amd64,linux/arm64
@@ -121,7 +121,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
@@ -140,8 +140,8 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
@@ -169,7 +169,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
@@ -188,8 +188,8 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}

View File

@@ -36,13 +36,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
@@ -63,7 +63,7 @@ jobs:
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
@@ -84,13 +84,13 @@ jobs:
ct lint --config charts/.ci/ct-config-gha.yaml
- name: Set up docker buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
if: steps.list-changed.outputs.changed == 'true'
with:
version: latest
- name: Build controller image
uses: docker/build-push-action@v3
uses: docker/build-push-action@v5
if: steps.list-changed.outputs.changed == 'true'
with:
file: Dockerfile

View File

@@ -55,11 +55,11 @@ jobs:
TARGET_REPO: actions-runner-controller
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
@@ -90,10 +90,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -110,16 +110,16 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
with:
version: latest
# Unstable builds - run at your own risk
- name: Build and Push
uses: docker/build-push-action@v3
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile

View File

@@ -25,10 +25,10 @@ jobs:
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install Go
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version-file: go.mod

View File

@@ -11,7 +11,7 @@ jobs:
check_for_first_interaction:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: actions/first-interaction@main
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -29,8 +29,8 @@ jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
cache: false
@@ -42,13 +42,13 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
cache: false
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
uses: golangci/golangci-lint-action@v6
with:
only-new-issues: true
version: v1.55.2
@@ -56,8 +56,8 @@ jobs:
generate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
cache: false
@@ -69,8 +69,8 @@ jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
- run: make manifests

View File

@@ -1,5 +1,5 @@
# Build the manager binary
FROM --platform=$BUILDPLATFORM golang:1.22.1 as builder
FROM --platform=$BUILDPLATFORM golang:1.22.4 as builder
WORKDIR /workspace

View File

@@ -6,7 +6,7 @@ endif
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
VERSION ?= dev
COMMIT_SHA = $(shell git rev-parse HEAD)
RUNNER_VERSION ?= 2.316.1
RUNNER_VERSION ?= 2.321.0
TARGETPLATFORM ?= $(shell arch)
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION}
@@ -310,7 +310,7 @@ github-release: release
# Otherwise we get errors like the below:
# Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property]
#
# Note that controller-gen newer than 0.6.0 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
# Note that controller-gen newer than 0.6.2 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
# Otherwise ObjectMeta embedded in Spec results in empty on the storage.
controller-gen:
ifeq (, $(shell which controller-gen))

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.9.2
version: 0.10.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.9.2"
appVersion: "0.10.0"
home: https://github.com/actions/actions-runner-controller

View File

@@ -65,6 +65,9 @@ spec:
{{- with .Values.flags.watchSingleNamespace }}
- "--watch-single-namespace={{ . }}"
{{- end }}
{{- with .Values.runnerMaxConcurrentReconciles }}
- "--runner-max-concurrent-reconciles={{ . }}"
{{- end }}
{{- with .Values.flags.updateStrategy }}
- "--update-strategy={{ . }}"
{{- end }}
@@ -79,6 +82,15 @@ spec:
- "--listener-metrics-endpoint="
- "--metrics-addr=0"
{{- end }}
{{- range .Values.flags.excludeLabelPropagationPrefixes }}
- "--exclude-label-propagation-prefix={{ . }}"
{{- end }}
{{- with .Values.flags.k8sClientRateLimiterQPS }}
- "--k8s-client-rate-limiter-qps={{ . }}"
{{- end }}
{{- with .Values.flags.k8sClientRateLimiterBurst }}
- "--k8s-client-rate-limiter-burst={{ . }}"
{{- end }}
command:
- "/manager"
{{- with .Values.metrics }}

View File

@@ -1035,3 +1035,41 @@ func TestControllerDeployment_MetricsPorts(t *testing.T) {
assert.Equal(t, value.frequency, 1, fmt.Sprintf("frequency of %q is not 1", key))
}
}
func TestDeployment_excludeLabelPropagationPrefixes(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml"))
require.NoError(t, err)
chart := new(Chart)
err = yaml.Unmarshal(chartContent, chart)
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"flags.excludeLabelPropagationPrefixes[0]": "prefix.com/",
"flags.excludeLabelPropagationPrefixes[1]": "complete.io/label",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(t, output, &deployment)
require.Len(t, deployment.Spec.Template.Spec.Containers, 1, "Expected one container")
container := deployment.Spec.Template.Spec.Containers[0]
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=prefix.com/")
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=complete.io/label")
}

View File

@@ -106,6 +106,11 @@ flags:
## Defaults to watch all namespaces when unset.
# watchSingleNamespace: ""
## The maximum number of concurrent reconciles which can be run by the EphemeralRunner controller.
# Increase this value to improve the throughput of the controller.
# It may also increase the load on the API server and the external service (e.g. GitHub API).
runnerMaxConcurrentReconciles: 2
## Defines how the controller should handle upgrades while having running jobs.
##
## The strategies available are:
@@ -121,3 +126,16 @@ flags:
## This can lead to a longer time to apply the change but it will ensure
## that you don't have any overprovisioning of runners.
updateStrategy: "immediate"
## Defines a list of prefixes that should not be propagated to internal resources.
## This is useful when you have labels that are used for internal purposes and should not be propagated to internal resources.
## See https://github.com/actions/actions-runner-controller/issues/3533 for more information.
##
## By default, all labels are propagated to internal resources
## Labels that match prefix specified in the list are excluded from propagation.
# excludeLabelPropagationPrefixes:
# - "argocd.argoproj.io/instance"
## Defines the K8s client rate limiter parameters.
# k8sClientRateLimiterQPS: 20
# k8sClientRateLimiterBurst: 30

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.9.2
version: 0.10.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.9.2"
appVersion: "0.10.0"
home: https://github.com/actions/actions-runner-controller

View File

@@ -55,7 +55,7 @@ type AutoscalingListenerReconciler struct {
ListenerMetricsAddr string
ListenerMetricsEndpoint string
resourceBuilder resourceBuilder
ResourceBuilder
}
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
@@ -242,17 +242,27 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, mirrorSecret, log)
}
// The listener pod failed might mean the mirror secret is out of date
// Delete the listener pod and re-create it to make sure the mirror secret is up to date
if listenerPod.Status.Phase == corev1.PodFailed && listenerPod.DeletionTimestamp.IsZero() {
log.Info("Listener pod failed, deleting it and re-creating it", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", listenerPod.Status.Reason, "message", listenerPod.Status.Message)
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return ctrl.Result{}, err
}
}
cs := listenerContainerStatus(listenerPod)
switch {
case cs == nil:
log.Info("Listener pod is not ready", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return ctrl.Result{}, nil
case cs.State.Terminated != nil:
log.Info("Listener pod is terminated", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", cs.State.Terminated.Reason, "message", cs.State.Terminated.Message)
if listenerPod.Status.Phase == corev1.PodRunning {
if err := r.publishRunningListener(autoscalingListener, false); err != nil {
log.Error(err, "Unable to publish runner listener down metric", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
}
if listenerPod.DeletionTimestamp.IsZero() {
log.Info("Deleting the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
case cs.State.Running != nil:
if err := r.publishRunningListener(autoscalingListener, true); err != nil {
log.Error(err, "Unable to publish running listener", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
// stop reconciling. We should never get to this point but if we do,
@@ -260,8 +270,8 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
// notify the reconciler again.
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
@@ -373,7 +383,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
}
func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
newServiceAccount := r.resourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
newServiceAccount := r.ResourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil {
return ctrl.Result{}, err
@@ -458,7 +468,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
logger.Info("Creating listener config secret")
podConfig, err := r.resourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
podConfig, err := r.ResourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
if err != nil {
logger.Error(err, "Failed to build listener config secret")
return ctrl.Result{}, err
@@ -477,7 +487,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
return ctrl.Result{Requeue: true}, nil
}
newPod, err := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
newPod, err := r.ResourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
if err != nil {
logger.Error(err, "Failed to build listener pod")
return ctrl.Result{}, err
@@ -537,7 +547,7 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
}
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
newListenerSecret := r.resourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
newListenerSecret := r.ResourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
return ctrl.Result{}, err
@@ -609,7 +619,7 @@ func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Con
}
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
newRole := r.resourceBuilder.newScaleSetListenerRole(autoscalingListener)
newRole := r.ResourceBuilder.newScaleSetListenerRole(autoscalingListener)
logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
if err := r.Create(ctx, newRole); err != nil {
@@ -637,7 +647,7 @@ func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Contex
}
func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) {
newRoleBinding := r.resourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
newRoleBinding := r.ResourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
logger.Info("Creating listener role binding",
"namespace", newRoleBinding.Namespace,
@@ -722,3 +732,13 @@ func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
Complete(r)
}
func listenerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {
for i := range pod.Status.ContainerStatuses {
cs := &pod.Status.ContainerStatuses[i]
if cs.Name == autoscalingListenerContainerName {
return cs
}
}
return nil
}

View File

@@ -25,7 +25,7 @@ import (
)
const (
autoscalingListenerTestTimeout = time.Second * 5
autoscalingListenerTestTimeout = time.Second * 20
autoscalingListenerTestInterval = time.Millisecond * 250
autoscalingListenerTestGitHubToken = "gh_token"
)
@@ -351,6 +351,53 @@ var _ = Describe("Test AutoScalingListener controller", func() {
autoscalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated")
})
It("It should re-create pod whenever listener container is terminated", func() {
// Waiting for the pod is created
pod := new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return pod.Name, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
oldPodUID := string(pod.UID)
updated := pod.DeepCopy()
updated.Status.ContainerStatuses = []corev1.ContainerStatus{
{
Name: autoscalingListenerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
},
}
err := k8sClient.Status().Update(ctx, updated)
Expect(err).NotTo(HaveOccurred(), "failed to update test pod")
// Waiting for the new pod is created
Eventually(
func() (string, error) {
pod := new(corev1.Pod)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return string(pod.UID), nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be re-created")
})
It("It should update mirror secrets to match secret used by AutoScalingRunnerSet", func() {
// Waiting for the pod is created
pod := new(corev1.Pod)
@@ -373,7 +420,18 @@ var _ = Describe("Test AutoScalingListener controller", func() {
Expect(err).NotTo(HaveOccurred(), "failed to update test secret")
updatedPod := pod.DeepCopy()
updatedPod.Status.Phase = corev1.PodFailed
// Ignore status running and consult the container state
updatedPod.Status.Phase = corev1.PodRunning
updatedPod.Status.ContainerStatuses = []corev1.ContainerStatus{
{
Name: autoscalingListenerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
},
},
},
}
err = k8sClient.Status().Update(ctx, updatedPod)
Expect(err).NotTo(HaveOccurred(), "failed to update test pod to failed")
@@ -420,6 +478,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
var autoscalingListener *v1alpha1.AutoscalingListener
var runAsUser int64 = 1001
const sidecarContainerName = "sidecar"
listenerPodTemplate := corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
@@ -432,7 +491,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
},
},
{
Name: "sidecar",
Name: sidecarContainerName,
ImagePullPolicy: corev1.PullIfNotPresent,
Image: "busybox",
},
@@ -525,7 +584,8 @@ var _ = Describe("Test AutoScalingListener customization", func() {
return created.Finalizers[0], nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
// Check if config is created
config := new(corev1.Secret)
@@ -559,11 +619,100 @@ var _ = Describe("Test AutoScalingListener customization", func() {
Expect(pod.Spec.Containers[0].SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
Expect(pod.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways), "Pod should have the correct image pull policy")
Expect(pod.Spec.Containers[1].Name).To(Equal("sidecar"), "Pod should have the correct container name")
Expect(pod.Spec.Containers[1].Name).To(Equal(sidecarContainerName), "Pod should have the correct container name")
Expect(pod.Spec.Containers[1].Image).To(Equal("busybox"), "Pod should have the correct image")
Expect(pod.Spec.Containers[1].ImagePullPolicy).To(Equal(corev1.PullIfNotPresent), "Pod should have the correct image pull policy")
})
})
Context("When AutoscalingListener pod has interuptions", func() {
It("Should re-create pod when it is deleted", func() {
pod := new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return pod.Name, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
Expect(len(pod.Spec.Containers)).To(Equal(2), "Pod should have 2 containers")
oldPodUID := string(pod.UID)
err := k8sClient.Delete(ctx, pod)
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
pod = new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return string(pod.UID), nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be created")
})
It("Should re-create pod when the listener pod is terminated", func() {
pod := new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return pod.Name, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
updated := pod.DeepCopy()
oldPodUID := string(pod.UID)
updated.Status.ContainerStatuses = []corev1.ContainerStatus{
{
Name: autoscalingListenerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
},
},
},
{
Name: sidecarContainerName,
State: corev1.ContainerState{
Running: &corev1.ContainerStateRunning{},
},
},
}
err := k8sClient.Status().Update(ctx, updated)
Expect(err).NotTo(HaveOccurred(), "failed to update pod status")
pod = new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return string(pod.UID), nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be created")
})
})
})
var _ = Describe("Test AutoScalingListener controller with proxy", func() {
@@ -887,7 +1036,6 @@ var _ = Describe("Test AutoScalingListener controller with template modification
g.Expect(pod.ObjectMeta.Annotations).To(HaveKeyWithValue("test-annotation-key", "test-annotation-value"), "pod annotations should be copied from runner set template")
g.Expect(pod.ObjectMeta.Labels).To(HaveKeyWithValue("test-label-key", "test-label-value"), "pod labels should be copied from runner set template")
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(Succeed(), "failed to create listener pod with proxy details")

View File

@@ -79,8 +79,7 @@ type AutoscalingRunnerSetReconciler struct {
DefaultRunnerScaleSetListenerImagePullSecrets []string
UpdateStrategy UpdateStrategy
ActionsClient actions.MultiClient
resourceBuilder resourceBuilder
ResourceBuilder
}
// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets,verbs=get;list;watch;create;update;patch;delete
@@ -134,17 +133,11 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, err
}
requeue, err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log)
if err != nil {
if err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log); err != nil {
log.Error(err, "Failed to remove finalizers on dependent resources")
return ctrl.Result{}, err
}
if requeue {
log.Info("Waiting for dependent resources to be deleted")
return ctrl.Result{Requeue: true}, nil
}
log.Info("Removing finalizer")
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName)
@@ -389,7 +382,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
return nil
}
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (requeue bool, err error) {
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
c := autoscalingRunnerSetFinalizerDependencyCleaner{
client: r.Client,
autoscalingRunnerSet: autoscalingRunnerSet,
@@ -404,12 +397,7 @@ func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(
c.removeManagerRoleBindingFinalizer(ctx)
c.removeManagerRoleFinalizer(ctx)
requeue, err = c.result()
if err != nil {
logger.Error(err, "Failed to cleanup finalizer from dependent resource")
return true, err
}
return requeue, nil
return c.Err()
}
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
@@ -634,7 +622,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
}
func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) {
desiredRunnerSet, err := r.resourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
desiredRunnerSet, err := r.ResourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
if err != nil {
log.Error(err, "Could not create EphemeralRunnerSet")
return ctrl.Result{}, err
@@ -663,7 +651,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
})
}
autoscalingListener, err := r.resourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
autoscalingListener, err := r.ResourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
if err != nil {
log.Error(err, "Could not create AutoscalingListener spec")
return ctrl.Result{}, err
@@ -784,17 +772,16 @@ type autoscalingRunnerSetFinalizerDependencyCleaner struct {
autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
logger logr.Logger
// fields to operate on
requeue bool
err error
err error
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) result() (requeue bool, err error) {
return c.requeue, c.err
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) Err() error {
return c.err
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleBindingFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
if c.err != nil {
c.logger.Info("Skipping cleaning up kubernetes mode service account")
return
}
@@ -825,7 +812,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes role binding", "name", roleBindingName)
return
case err != nil && !kerrors.IsNotFound(err):
@@ -838,7 +824,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
if c.err != nil {
return
}
@@ -868,7 +854,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes role")
return
case err != nil && !kerrors.IsNotFound(err):
@@ -881,7 +866,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeServiceAccountFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
if c.err != nil {
return
}
@@ -912,7 +897,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeSer
c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes service account")
return
case err != nil && !kerrors.IsNotFound(err):
@@ -925,7 +909,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeSer
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServiceAccountFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
if c.err != nil {
return
}
@@ -956,7 +940,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServi
c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from no permission service account", "name", serviceAccountName)
return
case err != nil && !kerrors.IsNotFound(err):
@@ -969,7 +952,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServi
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
if c.err != nil {
return
}
@@ -1000,7 +983,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinal
c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from GitHub secret", "name", githubSecretName)
return
case err != nil && !kerrors.IsNotFound(err) && !kerrors.IsForbidden(err):
@@ -1013,7 +995,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinal
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindingFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
if c.err != nil {
return
}
@@ -1044,7 +1026,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindin
c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from manager role binding", "name", managerRoleBindingName)
return
case err != nil && !kerrors.IsNotFound(err):
@@ -1057,7 +1038,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindin
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinalizer(ctx context.Context) {
if c.requeue || c.err != nil {
if c.err != nil {
return
}
@@ -1088,7 +1069,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinali
c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from manager role", "name", managerRoleName)
return
case err != nil && !kerrors.IsNotFound(err):

View File

@@ -34,7 +34,7 @@ import (
)
const (
autoscalingRunnerSetTestTimeout = time.Second * 5
autoscalingRunnerSetTestTimeout = time.Second * 20
autoscalingRunnerSetTestInterval = time.Millisecond * 250
autoscalingRunnerSetTestGitHubToken = "gh_token"
)

View File

@@ -49,10 +49,10 @@ const (
// EphemeralRunnerReconciler reconciles a EphemeralRunner object
type EphemeralRunnerReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
ActionsClient actions.MultiClient
resourceBuilder resourceBuilder
Log logr.Logger
Scheme *runtime.Scheme
ActionsClient actions.MultiClient
ResourceBuilder
}
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
@@ -149,19 +149,6 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) {
log.Info("Adding runner registration finalizer")
err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
controllerutil.AddFinalizer(obj, ephemeralRunnerActionsFinalizerName)
})
if err != nil {
log.Error(err, "Failed to update with runner registration finalizer set")
return ctrl.Result{}, err
}
log.Info("Successfully added runner registration finalizer")
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
log.Info("Adding finalizer")
if err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
@@ -175,9 +162,25 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) {
log.Info("Adding runner registration finalizer")
err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
controllerutil.AddFinalizer(obj, ephemeralRunnerActionsFinalizerName)
})
if err != nil {
log.Error(err, "Failed to update with runner registration finalizer set")
return ctrl.Result{}, err
}
log.Info("Successfully added runner registration finalizer")
return ctrl.Result{}, nil
}
if ephemeralRunner.Status.RunnerId == 0 {
log.Info("Creating new ephemeral runner registration and updating status with runner config")
return r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log)
if r, err := r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log); r != nil {
return *r, err
}
}
secret := new(corev1.Secret)
@@ -188,7 +191,17 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
}
// create secret if not created
log.Info("Creating new ephemeral runner secret for jitconfig.")
return r.createSecret(ctx, ephemeralRunner, log)
if r, err := r.createSecret(ctx, ephemeralRunner, log); r != nil {
return *r, err
}
// Retry to get the secret that was just created.
// Otherwise, even though we want to continue to create the pod,
// it fails due to the missing secret resulting in an invalid pod spec.
if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
log.Error(err, "Failed to fetch secret")
return ctrl.Result{}, err
}
}
pod := new(corev1.Pod)
@@ -510,27 +523,35 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
// updateStatusWithRunnerConfig fetches runtime configuration needed by the runner
// This method should always set .status.runnerId and .status.runnerJITConfig
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
// Runner is not registered with the service. We need to register it first
log.Info("Creating ephemeral runner JIT config")
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunner)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %v", err)
}
jitSettings := &actions.RunnerScaleSetJitRunnerSetting{
Name: ephemeralRunner.Name,
}
for i := range ephemeralRunner.Spec.Spec.Containers {
if ephemeralRunner.Spec.Spec.Containers[i].Name == EphemeralRunnerContainerName &&
ephemeralRunner.Spec.Spec.Containers[i].WorkingDir != "" {
jitSettings.WorkFolder = ephemeralRunner.Spec.Spec.Containers[i].WorkingDir
}
}
jitConfig, err := actionsClient.GenerateJitRunnerConfig(ctx, jitSettings, ephemeralRunner.Spec.RunnerScaleSetId)
if err != nil {
actionsError := &actions.ActionsError{}
if !errors.As(err, &actionsError) {
return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %v", err)
}
if actionsError.StatusCode != http.StatusConflict ||
!actionsError.IsException("AgentExistsException") {
return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %v", err)
}
// If the runner with the name we want already exists it means:
@@ -543,12 +564,12 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
log.Info("Getting runner jit config failed with conflict error, trying to get the runner by name", "runnerName", ephemeralRunner.Name)
existingRunner, err := actionsClient.GetRunnerByName(ctx, ephemeralRunner.Name)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to get runner by name: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to get runner by name: %v", err)
}
if existingRunner == nil {
log.Info("Runner with the same name does not exist, re-queuing the reconciliation")
return ctrl.Result{Requeue: true}, nil
return &ctrl.Result{Requeue: true}, nil
}
log.Info("Found the runner with the same name", "runnerId", existingRunner.Id, "runnerScaleSetId", existingRunner.RunnerScaleSetId)
@@ -556,16 +577,16 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
log.Info("Removing the runner with the same name")
err := actionsClient.RemoveRunner(ctx, int64(existingRunner.Id))
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %v", err)
}
log.Info("Removed the runner with the same name, re-queuing the reconciliation")
return ctrl.Result{Requeue: true}, nil
return &ctrl.Result{Requeue: true}, nil
}
// TODO: Do we want to mark the ephemeral runner as failed, and let EphemeralRunnerSet to clean it up, so we can recover from this situation?
// The situation is that the EphemeralRunner's name is already used by something else to register a runner, and we can't take the control back.
return ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %v", err)
return &ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %v", err)
}
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
@@ -576,11 +597,20 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
obj.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
})
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %v", err)
}
// We want to continue without a requeue for faster pod creation.
//
// To do so, we update the status in-place, so that both continuing the loop and
// and requeuing and skipping updateStatusWithRunnerConfig in the next loop, will
// have the same effect.
ephemeralRunner.Status.RunnerId = jitConfig.Runner.Id
ephemeralRunner.Status.RunnerName = jitConfig.Runner.Name
ephemeralRunner.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
log.Info("Updated ephemeral runner status with runnerId and runnerJITConfig")
return ctrl.Result{}, nil
return nil, nil
}
func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) {
@@ -633,7 +663,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
}
log.Info("Creating new pod for ephemeral runner")
newPod := r.resourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...)
newPod := r.ResourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...)
if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil {
log.Error(err, "Failed to set controller reference to a new pod")
@@ -656,21 +686,21 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
return ctrl.Result{}, nil
}
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
log.Info("Creating new secret for ephemeral runner")
jitSecret := r.resourceBuilder.newEphemeralRunnerJitSecret(runner)
jitSecret := r.ResourceBuilder.newEphemeralRunnerJitSecret(runner)
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to set controller reference: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %v", err)
}
log.Info("Created new secret spec for ephemeral runner")
if err := r.Create(ctx, jitSecret); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to create jit secret: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to create jit secret: %v", err)
}
log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name)
return ctrl.Result{Requeue: true}, nil
return nil, nil
}
// updateRunStatusFromPod is responsible for updating non-exiting statuses.
@@ -814,13 +844,14 @@ func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context,
}
// SetupWithManager sets up the controller with the Manager.
func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.EphemeralRunner{}).
Owns(&corev1.Pod{}).
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
Named("ephemeral-runner-controller").
Complete(r)
func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager, opts ...Option) error {
return builderWithOptions(
ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.EphemeralRunner{}).
Owns(&corev1.Pod{}).
WithEventFilter(predicate.ResourceVersionChangedPredicate{}),
opts,
).Complete(r)
}
func runnerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {

View File

@@ -29,9 +29,9 @@ import (
)
const (
timeout = time.Second * 10
interval = time.Millisecond * 250
runnerImage = "ghcr.io/actions/actions-runner:latest"
ephemeralRunnerTimeout = time.Second * 20
ephemeralRunnerInterval = time.Millisecond * 250
runnerImage = "ghcr.io/actions/actions-runner:latest"
)
func newExampleRunner(name, namespace, configSecretName string) *v1alpha1.EphemeralRunner {
@@ -133,9 +133,9 @@ var _ = Describe("EphemeralRunner", func() {
n := len(created.Finalizers) // avoid capacity mismatch
return created.Finalizers[:n:n], nil
},
timeout,
interval,
).Should(BeEquivalentTo([]string{ephemeralRunnerActionsFinalizerName, ephemeralRunnerFinalizerName}))
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo([]string{ephemeralRunnerFinalizerName, ephemeralRunnerActionsFinalizerName}))
Eventually(
func() (bool, error) {
@@ -147,8 +147,8 @@ var _ = Describe("EphemeralRunner", func() {
_, ok := secret.Data[jitTokenKey]
return ok, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
Eventually(
@@ -160,8 +160,8 @@ var _ = Describe("EphemeralRunner", func() {
return pod.Name, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(ephemeralRunner.Name))
})
@@ -184,8 +184,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
@@ -203,7 +203,7 @@ var _ = Describe("EphemeralRunner", func() {
return "", nil
}
return updated.Status.Phase, nil
}, timeout, interval).Should(BeEquivalentTo(corev1.PodFailed))
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodFailed))
Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
})
@@ -247,8 +247,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
// create runner linked secret
@@ -273,8 +273,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
err = k8sClient.Delete(ctx, ephemeralRunner)
@@ -289,8 +289,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
Eventually(
@@ -302,8 +302,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
Eventually(
@@ -315,8 +315,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
Eventually(
@@ -328,8 +328,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
Eventually(
@@ -341,8 +341,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
@@ -356,8 +356,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return updatedEphemeralRunner.Status.RunnerId, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeNumerically(">", 0))
})
@@ -371,8 +371,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
for _, phase := range []corev1.PodPhase{corev1.PodRunning, corev1.PodPending} {
@@ -395,8 +395,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return updated.Status.Phase, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(phase))
}
})
@@ -411,8 +411,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
pod.Status.Phase = corev1.PodRunning
@@ -427,7 +427,7 @@ var _ = Describe("EphemeralRunner", func() {
}
return updated.Status.Phase, nil
},
timeout,
ephemeralRunnerTimeout,
).Should(BeEquivalentTo(""))
})
@@ -462,8 +462,8 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).To(BeNil(), "Failed to update pod status")
return false, fmt.Errorf("pod haven't failed for 5 times.")
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true), "we should stop creating pod after 5 failures")
// In case we still have pod created due to controller-runtime cache delay, mark the container as exited
@@ -488,7 +488,7 @@ var _ = Describe("EphemeralRunner", func() {
return "", err
}
return updated.Status.Reason, nil
}, timeout, interval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures")
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures")
// EphemeralRunner should not have any pod
Eventually(func() (bool, error) {
@@ -497,7 +497,7 @@ var _ = Describe("EphemeralRunner", func() {
return false, nil
}
return kerrors.IsNotFound(err), nil
}, timeout, interval).Should(BeEquivalentTo(true))
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
})
It("It should re-create pod on eviction", func() {
@@ -510,8 +510,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
pod.Status.Phase = corev1.PodFailed
@@ -530,7 +530,7 @@ var _ = Describe("EphemeralRunner", func() {
return false, err
}
return len(updated.Status.Failures) == 1, nil
}, timeout, interval).Should(BeEquivalentTo(true))
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
// should re-create after failure
Eventually(
@@ -541,8 +541,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
@@ -555,8 +555,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
@@ -577,7 +577,7 @@ var _ = Describe("EphemeralRunner", func() {
return false, err
}
return len(updated.Status.Failures) == 1, nil
}, timeout, interval).Should(BeEquivalentTo(true))
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
// should re-create after failure
Eventually(
@@ -588,8 +588,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
@@ -602,8 +602,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
// first set phase to running
@@ -627,8 +627,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return updated.Status.Phase, nil
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(corev1.PodRunning))
// set phase to succeeded
@@ -644,7 +644,7 @@ var _ = Describe("EphemeralRunner", func() {
}
return updated.Status.Phase, nil
},
timeout,
ephemeralRunnerTimeout,
).Should(BeEquivalentTo(corev1.PodRunning))
})
})
@@ -700,7 +700,7 @@ var _ = Describe("EphemeralRunner", func() {
return false, err
}
return true, nil
}, timeout, interval).Should(BeEquivalentTo(true))
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: EphemeralRunnerContainerName,
@@ -720,7 +720,7 @@ var _ = Describe("EphemeralRunner", func() {
return "", nil
}
return updated.Status.Phase, nil
}, timeout, interval).Should(BeEquivalentTo(corev1.PodSucceeded))
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodSucceeded))
})
})
@@ -800,7 +800,7 @@ var _ = Describe("EphemeralRunner", func() {
return proxySuccessfulllyCalled
},
2*time.Second,
interval,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
@@ -825,8 +825,8 @@ var _ = Describe("EphemeralRunner", func() {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
g.Expect(err).To(BeNil(), "failed to get ephemeral runner pod")
},
timeout,
interval,
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(Succeed(), "failed to get ephemeral runner pod")
Expect(pod.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{
@@ -958,7 +958,7 @@ var _ = Describe("EphemeralRunner", func() {
return serverSuccessfullyCalled
},
2*time.Second,
interval,
ephemeralRunnerInterval,
).Should(BeTrue(), "failed to contact server")
})
})

View File

@@ -53,7 +53,7 @@ type EphemeralRunnerSetReconciler struct {
PublishMetrics bool
resourceBuilder resourceBuilder
ResourceBuilder
}
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
@@ -360,7 +360,7 @@ func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Contex
// Track multiple errors at once and return the bundle.
errs := make([]error, 0)
for i := 0; i < count; i++ {
ephemeralRunner := r.resourceBuilder.newEphemeralRunner(runnerSet)
ephemeralRunner := r.ResourceBuilder.newEphemeralRunner(runnerSet)
if runnerSet.Spec.EphemeralRunnerSpec.Proxy != nil {
ephemeralRunner.Spec.ProxySecretRef = proxyEphemeralRunnerSetSecretName(runnerSet)
}

View File

@@ -30,7 +30,7 @@ import (
)
const (
ephemeralRunnerSetTestTimeout = time.Second * 10
ephemeralRunnerSetTestTimeout = time.Second * 20
ephemeralRunnerSetTestInterval = time.Millisecond * 250
ephemeralRunnerSetTestGitHubToken = "gh_token"
)
@@ -794,7 +794,6 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
}
return len(runnerList.Items), nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
@@ -1359,7 +1358,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
return proxySuccessfulllyCalled
},
2*time.Second,
interval,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
})

View File

@@ -0,0 +1,56 @@
package actionsgithubcom
import (
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/controller"
)
// Options is the optional configuration for the controllers, which can be
// set via command-line flags or environment variables.
type Options struct {
// RunnerMaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run
// by the EphemeralRunnerController.
RunnerMaxConcurrentReconciles int
}
// OptionsWithDefault returns the default options.
// This is here to maintain the options and their default values in one place,
// rather than having to correlate those in multiple places.
func OptionsWithDefault() Options {
return Options{
RunnerMaxConcurrentReconciles: 2,
}
}
type Option func(*controller.Options)
// WithMaxConcurrentReconciles sets the maximum number of concurrent Reconciles which can be run.
//
// This is useful to improve the throughput of the controller, but it may also increase the load on the API server and
// the external service (e.g. GitHub API). The default value is 1, as defined by the controller-runtime.
//
// See https://github.com/actions/actions-runner-controller/issues/3021 for more information
// on real-world use cases and the potential impact of this option.
func WithMaxConcurrentReconciles(n int) Option {
return func(b *controller.Options) {
b.MaxConcurrentReconciles = n
}
}
// builderWithOptions applies the given options to the provided builder, if any.
// This is a helper function to avoid the need to import the controller-runtime package in every reconciler source file
// and the command package that creates the controller.
// This is also useful for reducing code duplication around setting controller options in
// multiple reconcilers.
func builderWithOptions(b *builder.Builder, opts []Option) *builder.Builder {
if len(opts) == 0 {
return b
}
var controllerOpts controller.Options
for _, opt := range opts {
opt(&controllerOpts)
}
return b.WithOptions(controllerOpts)
}

View File

@@ -8,6 +8,7 @@ import (
"math"
"net"
"strconv"
"strings"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/build"
@@ -68,9 +69,11 @@ func SetListenerEntrypoint(entrypoint string) {
}
}
type resourceBuilder struct{}
type ResourceBuilder struct {
ExcludeLabelPropagationPrefixes []string
}
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
func (b *ResourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
return nil, err
@@ -85,7 +88,7 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
}
labels := mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
labels := b.mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
@@ -150,7 +153,7 @@ func (lm *listenerMetricsServerConfig) containerPort() (corev1.ContainerPort, er
}, nil
}
func (b *resourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
var (
metricsAddr = ""
metricsEndpoint = ""
@@ -213,7 +216,7 @@ func (b *resourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
}, nil
}
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
listenerEnv := []corev1.EnvVar{
{
Name: "LISTENER_CONFIG_PATH",
@@ -406,12 +409,12 @@ func mergeListenerContainer(base, from *corev1.Container) {
base.TTY = from.TTY
}
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
func (b *ResourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerServiceAccountName(autoscalingListener),
Namespace: autoscalingListener.Namespace,
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
}),
@@ -419,14 +422,14 @@ func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener
}
}
func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.AutoscalingListener) *rbacv1.Role {
func (b *ResourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.AutoscalingListener) *rbacv1.Role {
rules := rulesForListenerRole([]string{autoscalingListener.Spec.EphemeralRunnerSetName})
rulesHash := hash.ComputeTemplateHash(&rules)
newRole := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerRoleName(autoscalingListener),
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
labelKeyListenerNamespace: autoscalingListener.Namespace,
@@ -440,7 +443,7 @@ func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.
return newRole
}
func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount) *rbacv1.RoleBinding {
func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount) *rbacv1.RoleBinding {
roleRef := rbacv1.RoleRef{
Kind: "Role",
Name: listenerRole.Name,
@@ -460,7 +463,7 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerRoleName(autoscalingListener),
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
labelKeyListenerNamespace: autoscalingListener.Namespace,
@@ -476,14 +479,14 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
return newRoleBinding
}
func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
func (b *ResourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
dataHash := hash.ComputeTemplateHash(&secret.Data)
newListenerSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerSecretMirrorName(autoscalingListener),
Namespace: autoscalingListener.Namespace,
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
"secret-data-hash": dataHash,
@@ -495,14 +498,14 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
return newListenerSecret
}
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
return nil, err
}
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
labels := mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
labels := b.mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesComponent: "runner-set",
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
@@ -515,7 +518,6 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
}
newAnnotations := map[string]string{
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
AnnotationKeyGitHubRunnerScaleSetName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName],
annotationKeyRunnerSpecHash: runnerSpecHash,
@@ -545,7 +547,7 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
return newEphemeralRunnerSet, nil
}
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
func (b *ResourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
labels := make(map[string]string)
for k, v := range ephemeralRunnerSet.Labels {
if k == LabelKeyKubernetesComponent {
@@ -572,7 +574,7 @@ func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme
}
}
func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
var newPod corev1.Pod
labels := map[string]string{}
@@ -640,7 +642,7 @@ func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
return &newPod
}
func (b *resourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret {
func (b *ResourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: ephemeralRunner.Name,
@@ -748,14 +750,26 @@ func trimLabelValue(val string) string {
return val
}
func mergeLabels(base, overwrite map[string]string) map[string]string {
mergedLabels := map[string]string{}
func (b *ResourceBuilder) mergeLabels(base, overwrite map[string]string) map[string]string {
mergedLabels := make(map[string]string, len(base))
base:
for k, v := range base {
for _, prefix := range b.ExcludeLabelPropagationPrefixes {
if strings.HasPrefix(k, prefix) {
continue base
}
}
mergedLabels[k] = v
}
overwrite:
for k, v := range overwrite {
for _, prefix := range b.ExcludeLabelPropagationPrefixes {
if strings.HasPrefix(k, prefix) {
continue overwrite
}
}
mergedLabels[k] = v
}

View File

@@ -19,9 +19,13 @@ func TestLabelPropagation(t *testing.T) {
Name: "test-scale-set",
Namespace: "test-ns",
Labels: map[string]string{
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesVersion: "0.2.0",
"arbitrary-label": "random-value",
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesVersion: "0.2.0",
"arbitrary-label": "random-value",
"example.com/label": "example-value",
"example.com/example": "example-value",
"directly.excluded.org/label": "excluded-value",
"directly.excluded.org/arbitrary": "not-excluded-value",
},
Annotations: map[string]string{
runnerScaleSetIdAnnotationKey: "1",
@@ -34,7 +38,12 @@ func TestLabelPropagation(t *testing.T) {
},
}
var b resourceBuilder
b := ResourceBuilder{
ExcludeLabelPropagationPrefixes: []string{
"example.com/",
"directly.excluded.org/label",
},
}
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(&autoscalingRunnerSet)
require.NoError(t, err)
assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf])
@@ -63,6 +72,11 @@ func TestLabelPropagation(t *testing.T) {
assert.Equal(t, "repo", listener.Labels[LabelKeyGitHubRepository])
assert.Equal(t, autoscalingRunnerSet.Labels["arbitrary-label"], listener.Labels["arbitrary-label"])
assert.NotContains(t, listener.Labels, "example.com/label")
assert.NotContains(t, listener.Labels, "example.com/example")
assert.NotContains(t, listener.Labels, "directly.excluded.org/label")
assert.Equal(t, "not-excluded-value", listener.Labels["directly.excluded.org/arbitrary"])
listenerServiceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
@@ -128,7 +142,7 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
GitHubConfigUrl: fmt.Sprintf("https://github.com/%s/%s", organization, repository),
}
var b resourceBuilder
var b ResourceBuilder
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
require.NoError(t, err)
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 0)
@@ -152,7 +166,7 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
GitHubConfigUrl: fmt.Sprintf("https://github.com/enterprises/%s", enterprise),
}
var b resourceBuilder
var b ResourceBuilder
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
require.NoError(t, err)
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 63)

View File

@@ -42,7 +42,7 @@ eliminate some duplication:
`-coverprofile` flags: while `-short` is used to skip [old ARC E2E
tests](https://github.com/actions/actions-runner-controller/blob/master/test/e2e/e2e_test.go#L85-L87),
`-coverprofile` is adding to the test time without really giving us any value
in return. We should also start using `actions/setup-go@v4` to take advantage
in return. We should also start using `actions/setup-go@v5` to take advantage
of caching (it would speed up our tests by a lot) or enable it on `v3` if we
have a strong reason not to upgrade. We should keep ignoring our E2E tests too
as those will be run elsewhere (either use `Short` there too or ignoring the

View File

@@ -43,6 +43,35 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
## Changelog
### 0.10.0
This release includes major improvements to the runner provisioning duration. In short, you should see less latency between queueing a workflow run and having a runner available to execute the job.
Make sure to check [#3832](https://github.com/actions/actions-runner-controller/pull/3832) and [#3848](https://github.com/actions/actions-runner-controller/pull/3848) for details on how to fine-tune that behavior.
### Major changes
1. Add exponential backoff when generating runner reg tokens [#3724](https://github.com/actions/actions-runner-controller/pull/3724)
1. Make EphemeralRunnerController MaxConcurrentReconciles configurable [#3832](https://github.com/actions/actions-runner-controller/pull/3832)
1. Make EphemeralRunnerReconciler create runner pods earlier [#3831](https://github.com/actions/actions-runner-controller/pull/3831)
1. Make k8s client rate limiter parameters configurable [#3848](https://github.com/actions/actions-runner-controller/pull/3848)
### Minor changes
1. Bump github.com/bradleyfalzon/ghinstallation/v2 from `2.8.0` to `2.12.0` [#3837](https://github.com/actions/actions-runner-controller/pull/3837)
2. Bump golang.org/x/crypto from `0.22.0` to `0.31.0` [#3844](https://github.com/actions/actions-runner-controller/pull/3844)
1. Update docs with details for the dashboard visualizations [#3696](https://github.com/actions/actions-runner-controller/pull/3696)
### v0.9.3
1. AutoscalingListener controller: Inspect listener container state instead of pod phase [#3548](https://github.com/actions/actions-runner-controller/pull/3548)
1. Exclude label prefix propagation [#3607](https://github.com/actions/actions-runner-controller/pull/3607)
1. Check status code of fetch access token for github app [#3568](https://github.com/actions/actions-runner-controller/pull/3568)
1. Remove .Named() from the ephemeral runner controller [#3596](https://github.com/actions/actions-runner-controller/pull/3596)
1. Customize work directory [#3477](https://github.com/actions/actions-runner-controller/pull/3477)
1. Fix problem with ephemeralRunner Succeeded state before build executed [#3528](https://github.com/actions/actions-runner-controller/pull/3528)
1. Remove finalizers in one pass to speed up cleanups AutoscalingRunnerSet [#3536](https://github.com/actions/actions-runner-controller/pull/3536)
### v0.9.2
1. Refresh session if token expires during delete message [#3529](https://github.com/actions/actions-runner-controller/pull/3529)
@@ -81,21 +110,24 @@ Please evaluate these changes carefully before upgrading.
1. Fix overscaling when the controller is much faster then the listener [#3371](https://github.com/actions/actions-runner-controller/pull/3371)
1. Add retry on 401 and 403 for runner-registration [#3377](https://github.com/actions/actions-runner-controller/pull/3377)
### v0.8.3
1. Expose volumeMounts and volumes in gha-runner-scale-set-controller [#3260](https://github.com/actions/actions-runner-controller/pull/3260)
1. Refer to the correct variable in discovery error message [#3296](https://github.com/actions/actions-runner-controller/pull/3296)
1. Fix acquire jobs after session refresh ghalistener [#3307](https://github.com/actions/actions-runner-controller/pull/3307)
### v0.8.2
1. Add listener graceful termination period and background context after the message is received [#3187](https://github.com/actions/actions-runner-controller/pull/3187)
1. Publish metrics in the new ghalistener [#3193](https://github.com/actions/actions-runner-controller/pull/3193)
1. Delete message session when listener.Listen returns [#3240](https://github.com/actions/actions-runner-controller/pull/3240)
### v0.8.1
1. Fix proxy issue in new listener client [#3181](https://github.com/actions/actions-runner-controller/pull/3181)
### v0.8.0
1. Change listener container name [#3167](https://github.com/actions/actions-runner-controller/pull/3167)
1. Fix empty env and volumeMounts object on default setup [#3166](https://github.com/actions/actions-runner-controller/pull/3166)
1. Fix override listener pod spec [#3161](https://github.com/actions/actions-runner-controller/pull/3161)
@@ -117,6 +149,7 @@ Please evaluate these changes carefully before upgrading.
1. ADR: Changing semantics of min runners to be min idle runners [#3040](https://github.com/actions/actions-runner-controller/pull/3040)
### v0.7.0
1. Add ResizePolicy and RestartPolicy on mergeListenerContainer [#3075](https://github.com/actions/actions-runner-controller/pull/3075)
1. feat: GHA controller Helm Chart quoted labels [#3061](https://github.com/actions/actions-runner-controller/pull/3061)
1. Update authorization for PAT to be Bearer as documented [#3039](https://github.com/actions/actions-runner-controller/pull/3039)
@@ -131,12 +164,14 @@ Please evaluate these changes carefully before upgrading.
1. chore: Service accounts in Kubernetes mode can now be annotated. [#2566](https://github.com/actions/actions-runner-controller/pull/2566)
### v0.6.1
1. Replace TLS dockerd connection with unix socket [#2833](https://github.com/actions/actions-runner-controller/pull/2833)
1. Fix name override labels when runnerScaleSetName value is set [#2915](https://github.com/actions/actions-runner-controller/pull/2915)
1. Fix nil map when annotations are applied [#2916](https://github.com/actions/actions-runner-controller/pull/2916)
1. Updates: container-hooks to v0.4.0 [#2928](https://github.com/actions/actions-runner-controller/pull/2928)
### v0.6.0
1. Fix parsing AcquireJob MessageQueueTokenExpiredError [#2837](https://github.com/actions/actions-runner-controller/pull/2837)
1. Set restart policy on the runner pod to Never if restartPolicy is not set in template [#2787](https://github.com/actions/actions-runner-controller/pull/2787)
1. Set the AutoscalingRunnerSet name to runnerScaleSetName [#2803](https://github.com/actions/actions-runner-controller/pull/2803)

View File

@@ -13,3 +13,25 @@ We do not intend to provide a supported ARC dashboard. This is simply a referenc
1. Make sure to have [Grafana](https://grafana.com/docs/grafana/latest/installation/) and [Prometheus](https://prometheus.io/docs/prometheus/latest/installation/) running in your cluster.
2. Make sure that Prometheus is properly scraping the metrics endpoints of the controller-manager and listeners.
3. Import the [dashboard](ARC-Autoscaling-Runner-Set-Monitoring_1692627561838.json) into Grafana.
## Details
This dashboard demonstrates some of the metrics provided by ARC and the underlying Kubernetes runtime. It provides a sample visualization of the behavior of the runner scale set, the ARC controllers, and the listeners. This should not be considered a comprehensive dashboard; it is a starting point that can be used with other metrics and logs to understand the health of the cluster. Review the [GitHub documentation detailing the Actions Runner Controller metrics and how to enable them](https://docs.github.com/en/enterprise-server@3.10/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller#enabling-metrics).
The dashboard includes the following metrics:
| Label | Description |
| -------------------------------- | ----------------------------------------------------|
| Active listeners | The number of listeners currently running and attempting to manage jobs for the scale set. This should match the number of scale sets deployed. |
| Runner States | Displays the number of runners in a given state. The finished and deleted states are not included in this panel. |
| Failed (total) | The total number of ephemeral runners that have failed to properly start. This may require reviewing the custom resource and logs to identify and resolve the root causes. Common causes include resource issues and failure to pull the required image. |
| Pending (total) | The total number of ephemeral runners that ARC has requested and is waiting for Kubernetes to provide in a running state. If the Kubernetes API server is responsive, this will typically match the number of runner pods that are in a pending state. This number includes requests for runner pods that have not yet been scheduled. When this number is higher than the number of runner pods in a pending state, it can indicate performance issues with the API server and resource contention. |
| Idle (total) | The total number of ephemeral runners that are available to accept jobs across all scale sets. Keeping a pool of idle runners can enable a faster start time under load, but excessive idle runners will consume resources and can prevent nodes from scaling down. |
| Total assigned jobs per listener | The number of workflow jobs acquired and assigned to the listener. The listener must provide supporting runners to complete these jobs. Once jobs are assigned, they cannot be delegated to other listeners and must be processed by the scale set or cancelled. |
| Assigned vs running jobs | Compares the number of jobs assigned against the number of runners that are currently processing jobs. When running jobs is less than assigned jobs, it can indicate that ARC is waiting on Kubernetes to provide and start additional runners. |
| Average startup duration | The average time in seconds between when jobs are assigned and when a runner accepts the job and begins processing. An increasing duration can indicate that the cluster has resource contention or a lack of available nodes for scheduling jobs |
| Average execution duration | The average time in seconds that runners are taking to complete a job. Changes in this value reflect the efficiency of workflow jobs and the pod configuration. If the value is decreasing without changes to the job, it can indicate resource contention or CPU throttling. |
| Reconciliation errors | Reconciliation is the process of a controller ensuring the desired state and actual state of the resources match. Each time an event occurs on a resource watched by the controller, the controller is required to indicate if the new state matches the desired state. Kubernetes adds a task to the work queue for the controller to perform this reconciliation. Errors indicate that controller has not achieved a desired state and is requesting Kubernetes to queue another request for reconciliation. Ideally, this number remains close to zero. An increasing number can indicate resource contention or delays processing API server requests. This reflects Kubernetes resources that ARC is waiting to be provided or in the necessary state. As a concrete example, ARC will request the creation of a secret prior to creating the pod. If the response indicates the secret is not immediately ready, ARC will requeue the reconciliation task with the error details, incrementing this count. |
| Reconciliation time | A histogram reflecting the time in seconds to perform a single reconciliation task from the controller's work queue. A histogram counts the number of requests that are processed within a given bucket of time. This metric reflects the time it takes for ARC to complete each step in the processing of creating, managing, and cleaning up runners. As this increases, it can indicate resource contention or processing delays within Kubernetes or the API server. This displays shows an average, which may hide larger or smaller times that are occurring in the processing. |
| Workqueue depth | The number of tasks that Kubernetes queued for the ARC controllers to process. This includes reconciliation requests and tasks from ARC. ARC sequentially processes a work queue of single, small task to avoid concurrency issues. Managing a runner requires multiple steps to prepare, create, update, and delete the runner, its resources, and the ARC custom resources. As each step is completed (or trigger reconciliation), new tasks are queued for processing. As the depth increases, it indicates more tasks awaiting time from the controller. Growth indicates increasing work and may indicate Kubernetes resource contention or processing latencies. Each request for a new runner will result in multiple tasks being added to the work queue to prepare and create the runner and the related ARC custom resources. |
| Scrape Duration (seconds) | The amount of time required for Prometheus to read the configured metrics from components in the cluster. An increasing number may indicate a lack of resources for Prometheus and a risk of the process exceeding the configured timeout, leading to lost metrics data. |

View File

@@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"net/url"
"strconv"
@@ -1054,6 +1055,14 @@ func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, c
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
return nil, &GitHubAPIError{
StatusCode: resp.StatusCode,
RequestID: resp.Header.Get(HeaderGitHubRequestID),
Err: fmt.Errorf("failed to get access token for GitHub App auth: %v", resp.Status),
}
}
// Format: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app
var accessToken *accessToken
if err = json.NewDecoder(resp.Body).Decode(&accessToken); err != nil {
@@ -1131,15 +1140,30 @@ func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *regis
}
retry++
if retry > 3 {
if retry > 5 {
return nil, fmt.Errorf("unable to register runner after 3 retries: %w", &GitHubAPIError{
StatusCode: resp.StatusCode,
RequestID: resp.Header.Get(HeaderGitHubRequestID),
Err: innerErr,
})
}
time.Sleep(time.Duration(500 * int(time.Millisecond) * (retry + 1)))
// Add exponential backoff + jitter to avoid thundering herd
// This will generate a backoff schedule:
// 1: 1s
// 2: 3s
// 3: 4s
// 4: 8s
// 5: 17s
baseDelay := 500 * time.Millisecond
jitter := time.Duration(rand.Intn(1000))
maxDelay := 20 * time.Second
delay := baseDelay*(1<<retry) + jitter
if delay > maxDelay {
delay = maxDelay
}
time.Sleep(delay)
}
var actionsServiceAdminConnection *ActionsServiceAdminConnection

View File

@@ -170,7 +170,7 @@ func TestNewActionsServiceRequest(t *testing.T) {
}
failures := 0
unauthorizedHandler := func(w http.ResponseWriter, r *http.Request) {
if failures < 2 {
if failures < 5 {
failures++
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusUnauthorized)

24
go.mod
View File

@@ -1,20 +1,20 @@
module github.com/actions/actions-runner-controller
go 1.22.1
go 1.22.4
require (
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0
github.com/bradleyfalzon/ghinstallation/v2 v2.12.0
github.com/davecgh/go-spew v1.1.1
github.com/evanphx/json-patch v5.9.0+incompatible
github.com/go-logr/logr v1.4.1
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/golang-jwt/jwt/v4 v4.5.1
github.com/google/go-cmp v0.6.0
github.com/google/go-github/v52 v52.0.0
github.com/google/uuid v1.6.0
github.com/gorilla/mux v1.8.1
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
github.com/gruntwork-io/terratest v0.46.7
github.com/hashicorp/go-retryablehttp v0.7.5
github.com/hashicorp/go-retryablehttp v0.7.7
github.com/kelseyhightower/envconfig v1.4.0
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.17.1
@@ -25,9 +25,9 @@ require (
github.com/teambition/rrule-go v1.8.2
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
golang.org/x/net v0.24.0
golang.org/x/net v0.25.0
golang.org/x/oauth2 v0.19.0
golang.org/x/sync v0.7.0
golang.org/x/sync v0.10.0
gomodules.xyz/jsonpatch/v2 v2.4.0
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.28.4
@@ -60,7 +60,7 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
github.com/google/go-github/v56 v56.0.0 // indirect
github.com/google/go-github/v66 v66.0.0 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20231101202521-4ca4178f5c7a // indirect
@@ -89,13 +89,13 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/urfave/cli v1.22.2 // indirect
golang.org/x/crypto v0.22.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
golang.org/x/sys v0.19.0 // indirect
golang.org/x/term v0.19.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.4.0 // indirect
golang.org/x/tools v0.17.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect

54
go.sum
View File

@@ -13,8 +13,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/boombuler/barcode v1.0.1 h1:NDBbPmhS+EqABEs5Kg3n/5ZNjy73Pz7SIV+KCeqyXcs=
github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0 h1:yUmoVv70H3J4UOqxqsee39+KlXxNEDfTbAp8c/qULKk=
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0/go.mod h1:fmPmvCiBWhJla3zDv9ZTQSZc8AbwyRnGW1yg5ep1Pcs=
github.com/bradleyfalzon/ghinstallation/v2 v2.12.0 h1:k8oVjGhZel2qmCUsYwSE34jPNT9DL2wCBOtugsHv26g=
github.com/bradleyfalzon/ghinstallation/v2 v2.12.0/go.mod h1:V4gJcNyAftH0rXpRp1SUVUuh+ACxOH1xOk/ZzkRHltg=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -35,6 +35,8 @@ github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
@@ -64,8 +66,8 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -91,8 +93,8 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github/v52 v52.0.0 h1:uyGWOY+jMQ8GVGSX8dkSwCzlehU3WfdxQ7GweO/JP7M=
github.com/google/go-github/v52 v52.0.0/go.mod h1:WJV6VEEUPuMo5pXqqa2ZCZEdbQqua4zAk2MZTIo+m+4=
github.com/google/go-github/v56 v56.0.0 h1:TysL7dMa/r7wsQi44BjqlwaHvwlFlqkK8CtBWCX3gb4=
github.com/google/go-github/v56 v56.0.0/go.mod h1:D8cdcX98YWJvi7TLo7zM4/h8ZTx6u6fwGEkCdisopo0=
github.com/google/go-github/v66 v66.0.0 h1:ADJsaXj9UotwdgK8/iFZtv7MLc8E8WBl62WLd/D/9+M=
github.com/google/go-github/v66 v66.0.0/go.mod h1:+4SO9Zkuyf8ytMj0csN1NR/5OTR+MfqPp8P8dVlcvY4=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -113,12 +115,12 @@ github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/U
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
@@ -145,8 +147,12 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 h1:ofNAzWCcyTALn2Zv40+8XitdzCgXY6e9qvXwN9W0YXg=
github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
@@ -233,8 +239,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -253,8 +259,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg=
golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -263,8 +269,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -286,15 +292,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@@ -302,8 +308,8 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.4.0 h1:Z81tqI5ddIoXDPvVQ7/7CC9TnLM7ubaFG2qXYd5BbYY=
golang.org/x/time v0.4.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -313,8 +319,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

54
main.go
View File

@@ -94,14 +94,20 @@ func main() {
runnerImagePullSecrets stringSlice
runnerPodDefaults actionssummerwindnet.RunnerPodDefaults
namespace string
logLevel string
logFormat string
watchSingleNamespace string
namespace string
logLevel string
logFormat string
watchSingleNamespace string
excludeLabelPropagationPrefixes stringSlice
autoScalerImagePullSecrets stringSlice
opts = actionsgithubcom.OptionsWithDefault()
commonRunnerLabels commaSeparatedStringSlice
k8sClientRateLimiterQPS int
k8sClientRateLimiterBurst int
)
var c github.Config
err = envconfig.Process("github", &c)
@@ -135,14 +141,18 @@ func main() {
flag.DurationVar(&defaultScaleDownDelay, "default-scale-down-delay", actionssummerwindnet.DefaultScaleDownDelay, "The approximate delay for a scale down followed by a scale up, used to prevent flapping (down->up->down->... loop)")
flag.IntVar(&port, "port", 9443, "The port to which the admission webhook endpoint should bind")
flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.")
flag.IntVar(&opts.RunnerMaxConcurrentReconciles, "runner-max-concurrent-reconciles", opts.RunnerMaxConcurrentReconciles, "The maximum number of concurrent reconciles which can be run by the EphemeralRunner controller. Increase this value to improve the throughput of the controller, but it may also increase the load on the API server and the external service (e.g. GitHub API).")
flag.Var(&commonRunnerLabels, "common-runner-labels", "Runner labels in the K1=V1,K2=V2,... format that are inherited all the runners created by the controller. See https://github.com/actions/actions-runner-controller/issues/321 for more information")
flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.")
flag.StringVar(&watchSingleNamespace, "watch-single-namespace", "", "Restrict to watch for custom resources in a single namespace.")
flag.Var(&excludeLabelPropagationPrefixes, "exclude-label-propagation-prefix", "The list of prefixes that should be excluded from label propagation")
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
flag.StringVar(&logFormat, "log-format", "text", `The log format. Valid options are "text" and "json". Defaults to "text"`)
flag.BoolVar(&autoScalingRunnerSetOnly, "auto-scaling-runner-set-only", false, "Make controller only reconcile AutoRunnerScaleSet object.")
flag.StringVar(&updateStrategy, "update-strategy", "immediate", `Resources reconciliation strategy on upgrade with running/pending jobs. Valid values are: "immediate", "eventual". Defaults to "immediate".`)
flag.Var(&autoScalerImagePullSecrets, "auto-scaler-image-pull-secrets", "The default image-pull secret name for auto-scaler listener container.")
flag.IntVar(&k8sClientRateLimiterQPS, "k8s-client-rate-limiter-qps", 20, "The QPS value of the K8s client rate limiter.")
flag.IntVar(&k8sClientRateLimiterBurst, "k8s-client-rate-limiter-burst", 30, "The burst value of the K8s client rate limiter.")
flag.Parse()
runnerPodDefaults.RunnerImagePullSecrets = runnerImagePullSecrets
@@ -154,6 +164,8 @@ func main() {
}
c.Log = &log
log.Info("Using options", "runner-max-concurrent-reconciles", opts.RunnerMaxConcurrentReconciles)
if !autoScalingRunnerSetOnly {
ghClient, err = c.NewClient()
if err != nil {
@@ -212,7 +224,11 @@ func main() {
})
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
cfg := ctrl.GetConfigOrDie()
cfg.QPS = float32(k8sClientRateLimiterQPS)
cfg.Burst = k8sClientRateLimiterBurst
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme,
Metrics: metricsserver.Options{
BindAddress: metricsAddr,
@@ -258,6 +274,10 @@ func main() {
log.WithName("actions-clients"),
)
rb := actionsgithubcom.ResourceBuilder{
ExcludeLabelPropagationPrefixes: excludeLabelPropagationPrefixes,
}
if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{
Client: mgr.GetClient(),
Log: log.WithName("AutoscalingRunnerSet").WithValues("version", build.Version),
@@ -267,27 +287,30 @@ func main() {
ActionsClient: actionsMultiClient,
UpdateStrategy: actionsgithubcom.UpdateStrategy(updateStrategy),
DefaultRunnerScaleSetListenerImagePullSecrets: autoScalerImagePullSecrets,
ResourceBuilder: rb,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "AutoscalingRunnerSet")
os.Exit(1)
}
if err = (&actionsgithubcom.EphemeralRunnerReconciler{
Client: mgr.GetClient(),
Log: log.WithName("EphemeralRunner").WithValues("version", build.Version),
Scheme: mgr.GetScheme(),
ActionsClient: actionsMultiClient,
}).SetupWithManager(mgr); err != nil {
Client: mgr.GetClient(),
Log: log.WithName("EphemeralRunner").WithValues("version", build.Version),
Scheme: mgr.GetScheme(),
ActionsClient: actionsMultiClient,
ResourceBuilder: rb,
}).SetupWithManager(mgr, actionsgithubcom.WithMaxConcurrentReconciles(opts.RunnerMaxConcurrentReconciles)); err != nil {
log.Error(err, "unable to create controller", "controller", "EphemeralRunner")
os.Exit(1)
}
if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{
Client: mgr.GetClient(),
Log: log.WithName("EphemeralRunnerSet").WithValues("version", build.Version),
Scheme: mgr.GetScheme(),
ActionsClient: actionsMultiClient,
PublishMetrics: metricsAddr != "0",
Client: mgr.GetClient(),
Log: log.WithName("EphemeralRunnerSet").WithValues("version", build.Version),
Scheme: mgr.GetScheme(),
ActionsClient: actionsMultiClient,
PublishMetrics: metricsAddr != "0",
ResourceBuilder: rb,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "EphemeralRunnerSet")
os.Exit(1)
@@ -299,6 +322,7 @@ func main() {
Scheme: mgr.GetScheme(),
ListenerMetricsAddr: listenerMetricsAddr,
ListenerMetricsEndpoint: listenerMetricsEndpoint,
ResourceBuilder: rb,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "AutoscalingListener")
os.Exit(1)

View File

@@ -6,8 +6,8 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless
OS_IMAGE ?= ubuntu-22.04
TARGETPLATFORM ?= $(shell arch)
RUNNER_VERSION ?= 2.316.1
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.6.0
RUNNER_VERSION ?= 2.321.0
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.6.2
DOCKER_VERSION ?= 24.0.7
# default list of platforms for which multiarch image is built

View File

@@ -1,2 +1,2 @@
RUNNER_VERSION=2.316.1
RUNNER_CONTAINER_HOOKS_VERSION=0.6.0
RUNNER_VERSION=2.321.0
RUNNER_CONTAINER_HOOKS_VERSION=0.6.2

View File

@@ -36,8 +36,8 @@ var (
testResultCMNamePrefix = "test-result-"
RunnerVersion = "2.316.1"
RunnerContainerHooksVersion = "0.6.0"
RunnerVersion = "2.321.0"
RunnerContainerHooksVersion = "0.6.2"
)
// If you're willing to run this test via VS Code "run test" or "debug test",
@@ -1106,7 +1106,7 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
testing.Step{
Uses: "actions/setup-go@v3",
With: &testing.With{
GoVersion: "1.22.1",
GoVersion: "1.22.4",
},
},
)
@@ -1236,7 +1236,7 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
testing.Step{
Uses: "azure/setup-kubectl@v1",
With: &testing.With{
Version: "v1.22.1",
Version: "v1.22.4",
},
},
testing.Step{

View File

@@ -355,7 +355,7 @@ nodes:
image: %s
`, k.Name, image, image))
if err := os.WriteFile(f.Name(), kindConfig, 0644); err != nil {
if err := os.WriteFile(f.Name(), kindConfig, 0o644); err != nil {
return err
}
@@ -385,7 +385,7 @@ func (k *Kind) LoadImages(ctx context.Context, images []ContainerImage) error {
}
tmpDir := filepath.Join(wd, ".testing", k.Name)
if err := os.MkdirAll(tmpDir, 0755); err != nil {
if err := os.MkdirAll(tmpDir, 0o755); err != nil {
return err
}
defer func() {

View File

@@ -1,7 +1,7 @@
package testing
const (
ActionsCheckout = "actions/checkout@v3"
ActionsCheckout = "actions/checkout@v4"
)
type Workflow struct {