mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
14 Commits
update-run
...
responsive
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a4876c5d03 | ||
|
|
f58dd76763 | ||
|
|
90b68fec1a | ||
|
|
1be410ba80 | ||
|
|
930c9db6e7 | ||
|
|
a152741a1a | ||
|
|
80d848339e | ||
|
|
8535a24135 | ||
|
|
b349ded2be | ||
|
|
6276c84493 | ||
|
|
4a8420ce96 | ||
|
|
a62ca3d853 | ||
|
|
4eb038eaa1 | ||
|
|
b2c6992e84 |
@@ -47,7 +47,7 @@ runs:
|
||||
-d '{"ref": "main", "inputs": { "arc_name": "${{inputs.arc-name}}" } }'
|
||||
|
||||
- name: Fetch workflow run & job ids
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
id: query_workflow
|
||||
with:
|
||||
script: |
|
||||
@@ -128,7 +128,7 @@ runs:
|
||||
|
||||
- name: Wait for workflow to start running
|
||||
if: inputs.wait-to-running == 'true' && inputs.wait-to-finish == 'false'
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
function sleep(ms) {
|
||||
@@ -156,7 +156,7 @@ runs:
|
||||
|
||||
- name: Wait for workflow to finish successfully
|
||||
if: inputs.wait-to-finish == 'true'
|
||||
uses: actions/github-script@v6
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
// Wait 5 minutes and make sure the workflow run we triggered completed with result 'success'
|
||||
|
||||
6
.github/actions/setup-arc-e2e/action.yaml
vendored
6
.github/actions/setup-arc-e2e/action.yaml
vendored
@@ -27,7 +27,7 @@ runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||
# BuildKit v0.11 which has a bug causing intermittent
|
||||
@@ -36,7 +36,7 @@ runs:
|
||||
driver-opts: image=moby/buildkit:v0.10.6
|
||||
|
||||
- name: Build controller image
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64
|
||||
@@ -56,7 +56,7 @@ runs:
|
||||
|
||||
- name: Get configure token
|
||||
id: config-token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ inputs.app-id }}
|
||||
application_private_key: ${{ inputs.app-pk }}
|
||||
|
||||
@@ -24,23 +24,23 @@ runs:
|
||||
shell: bash
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }}
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ inputs.username }}
|
||||
password: ${{ inputs.password }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }}
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ inputs.ghcr_username }}
|
||||
|
||||
12
.github/workflows/arc-publish-chart.yaml
vendored
12
.github/workflows/arc-publish-chart.yaml
vendored
@@ -40,12 +40,12 @@ jobs:
|
||||
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v3.4
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
@@ -134,7 +134,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -145,7 +145,7 @@ jobs:
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
@@ -184,7 +184,7 @@ jobs:
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
- name: Checkout target repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||
path: ${{ env.CHART_TARGET_REPO }}
|
||||
|
||||
6
.github/workflows/arc-publish.yaml
vendored
6
.github/workflows/arc-publish.yaml
vendored
@@ -39,9 +39,9 @@ jobs:
|
||||
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
@@ -73,7 +73,7 @@ jobs:
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
|
||||
4
.github/workflows/arc-release-runners.yaml
vendored
4
.github/workflows/arc-release-runners.yaml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
name: Trigger Build and Push of Runner Images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get runner version
|
||||
id: versions
|
||||
run: |
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
|
||||
@@ -21,7 +21,7 @@ jobs:
|
||||
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
|
||||
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get runner current and latest versions
|
||||
id: runner_versions
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
|
||||
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: PR Name
|
||||
id: pr_name
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: New branch
|
||||
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
|
||||
|
||||
8
.github/workflows/arc-validate-chart.yaml
vendored
8
.github/workflows/arc-validate-chart.yaml
vendored
@@ -40,13 +40,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
|
||||
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
|
||||
4
.github/workflows/arc-validate-runners.yaml
vendored
4
.github/workflows/arc-validate-runners.yaml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
name: runner / shellcheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: shellcheck
|
||||
uses: reviewdog/action-shellcheck@v1
|
||||
with:
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
|
||||
20
.github/workflows/gha-e2e-tests.yaml
vendored
20
.github/workflows/gha-e2e-tests.yaml
vendored
@@ -16,7 +16,7 @@ env:
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_REPO: arc_e2e_test_dummy
|
||||
IMAGE_NAME: "arc-test-image"
|
||||
IMAGE_VERSION: "0.9.2"
|
||||
IMAGE_VERSION: "0.9.3"
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -122,7 +122,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -213,7 +213,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: arc-test-dind-workflow.yaml
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -303,7 +303,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -402,7 +402,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -503,7 +503,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -598,7 +598,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -718,7 +718,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
@@ -888,7 +888,7 @@ jobs:
|
||||
env:
|
||||
WORKFLOW_FILE: arc-test-workflow.yaml
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.head_ref }}
|
||||
|
||||
|
||||
22
.github/workflows/gha-publish-chart.yaml
vendored
22
.github/workflows/gha-publish-chart.yaml
vendored
@@ -45,7 +45,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
@@ -72,10 +72,10 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||
# BuildKit v0.11 which has a bug causing intermittent
|
||||
@@ -84,14 +84,14 @@ jobs:
|
||||
driver-opts: image=moby/buildkit:v0.10.6
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build & push controller image
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@@ -121,7 +121,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
@@ -140,8 +140,8 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
|
||||
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
@@ -169,7 +169,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
@@ -188,8 +188,8 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
|
||||
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
|
||||
12
.github/workflows/gha-validate-chart.yaml
vendored
12
.github/workflows/gha-validate-chart.yaml
vendored
@@ -36,13 +36,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v3.5
|
||||
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
@@ -63,7 +63,7 @@ jobs:
|
||||
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
@@ -84,13 +84,13 @@ jobs:
|
||||
ct lint --config charts/.ci/ct-config-gha.yaml
|
||||
|
||||
- name: Set up docker buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Build controller image
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v5
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
file: Dockerfile
|
||||
|
||||
14
.github/workflows/global-publish-canary.yaml
vendored
14
.github/workflows/global-publish-canary.yaml
vendored
@@ -55,11 +55,11 @@ jobs:
|
||||
TARGET_REPO: actions-runner-controller
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
@@ -90,10 +90,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -110,16 +110,16 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
|
||||
# Unstable builds - run at your own risk
|
||||
- name: Build and Push
|
||||
uses: docker/build-push-action@v3
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
|
||||
4
.github/workflows/global-run-codeql.yaml
vendored
4
.github/workflows/global-run-codeql.yaml
vendored
@@ -25,10 +25,10 @@ jobs:
|
||||
security-events: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
check_for_first_interaction:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/first-interaction@main
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
18
.github/workflows/go.yaml
vendored
18
.github/workflows/go.yaml
vendored
@@ -29,8 +29,8 @@ jobs:
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache: false
|
||||
@@ -42,13 +42,13 @@ jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache: false
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
only-new-issues: true
|
||||
version: v1.55.2
|
||||
@@ -56,8 +56,8 @@ jobs:
|
||||
generate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache: false
|
||||
@@ -69,8 +69,8 @@ jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- run: make manifests
|
||||
|
||||
2
Makefile
2
Makefile
@@ -6,7 +6,7 @@ endif
|
||||
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
||||
VERSION ?= dev
|
||||
COMMIT_SHA = $(shell git rev-parse HEAD)
|
||||
RUNNER_VERSION ?= 2.317.0
|
||||
RUNNER_VERSION ?= 2.319.1
|
||||
TARGETPLATFORM ?= $(shell arch)
|
||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||
RUNNER_TAG ?= ${VERSION}
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.9.2
|
||||
version: 0.9.3
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.9.2"
|
||||
appVersion: "0.9.3"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -79,6 +79,9 @@ spec:
|
||||
- "--listener-metrics-endpoint="
|
||||
- "--metrics-addr=0"
|
||||
{{- end }}
|
||||
{{- range .Values.flags.excludeLabelPropagationPrefixes }}
|
||||
- "--exclude-label-propagation-prefix={{ . }}"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/manager"
|
||||
{{- with .Values.metrics }}
|
||||
|
||||
@@ -1035,3 +1035,41 @@ func TestControllerDeployment_MetricsPorts(t *testing.T) {
|
||||
assert.Equal(t, value.frequency, 1, fmt.Sprintf("frequency of %q is not 1", key))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeployment_excludeLabelPropagationPrefixes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||
require.NoError(t, err)
|
||||
|
||||
chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml"))
|
||||
require.NoError(t, err)
|
||||
|
||||
chart := new(Chart)
|
||||
err = yaml.Unmarshal(chartContent, chart)
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"flags.excludeLabelPropagationPrefixes[0]": "prefix.com/",
|
||||
"flags.excludeLabelPropagationPrefixes[1]": "complete.io/label",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
|
||||
|
||||
var deployment appsv1.Deployment
|
||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||
|
||||
require.Len(t, deployment.Spec.Template.Spec.Containers, 1, "Expected one container")
|
||||
container := deployment.Spec.Template.Spec.Containers[0]
|
||||
|
||||
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=prefix.com/")
|
||||
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=complete.io/label")
|
||||
}
|
||||
|
||||
@@ -121,3 +121,12 @@ flags:
|
||||
## This can lead to a longer time to apply the change but it will ensure
|
||||
## that you don't have any overprovisioning of runners.
|
||||
updateStrategy: "immediate"
|
||||
|
||||
## Defines a list of prefixes that should not be propagated to internal resources.
|
||||
## This is useful when you have labels that are used for internal purposes and should not be propagated to internal resources.
|
||||
## See https://github.com/actions/actions-runner-controller/issues/3533 for more information.
|
||||
##
|
||||
## By default, all labels are propagated to internal resources
|
||||
## Labels that match prefix specified in the list are excluded from propagation.
|
||||
# excludeLabelPropagationPrefixes:
|
||||
# - "argocd.argoproj.io/instance"
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.9.2
|
||||
version: 0.9.3
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.9.2"
|
||||
appVersion: "0.9.3"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ type AutoscalingListenerReconciler struct {
|
||||
ListenerMetricsAddr string
|
||||
ListenerMetricsEndpoint string
|
||||
|
||||
resourceBuilder resourceBuilder
|
||||
ResourceBuilder
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -242,17 +242,27 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, mirrorSecret, log)
|
||||
}
|
||||
|
||||
// The listener pod failed might mean the mirror secret is out of date
|
||||
// Delete the listener pod and re-create it to make sure the mirror secret is up to date
|
||||
if listenerPod.Status.Phase == corev1.PodFailed && listenerPod.DeletionTimestamp.IsZero() {
|
||||
log.Info("Listener pod failed, deleting it and re-creating it", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", listenerPod.Status.Reason, "message", listenerPod.Status.Message)
|
||||
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
cs := listenerContainerStatus(listenerPod)
|
||||
switch {
|
||||
case cs == nil:
|
||||
log.Info("Listener pod is not ready", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
return ctrl.Result{}, nil
|
||||
case cs.State.Terminated != nil:
|
||||
log.Info("Listener pod is terminated", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", cs.State.Terminated.Reason, "message", cs.State.Terminated.Message)
|
||||
|
||||
if listenerPod.Status.Phase == corev1.PodRunning {
|
||||
if err := r.publishRunningListener(autoscalingListener, false); err != nil {
|
||||
log.Error(err, "Unable to publish runner listener down metric", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
}
|
||||
|
||||
if listenerPod.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
case cs.State.Running != nil:
|
||||
if err := r.publishRunningListener(autoscalingListener, true); err != nil {
|
||||
log.Error(err, "Unable to publish running listener", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
// stop reconciling. We should never get to this point but if we do,
|
||||
@@ -260,8 +270,8 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
// notify the reconciler again.
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -373,7 +383,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
newServiceAccount := r.resourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
|
||||
newServiceAccount := r.ResourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
|
||||
|
||||
if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
@@ -458,7 +468,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
||||
|
||||
logger.Info("Creating listener config secret")
|
||||
|
||||
podConfig, err := r.resourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
|
||||
podConfig, err := r.ResourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to build listener config secret")
|
||||
return ctrl.Result{}, err
|
||||
@@ -477,7 +487,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
newPod, err := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
|
||||
newPod, err := r.ResourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to build listener pod")
|
||||
return ctrl.Result{}, err
|
||||
@@ -537,7 +547,7 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||
newListenerSecret := r.resourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
|
||||
newListenerSecret := r.ResourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
|
||||
|
||||
if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
@@ -609,7 +619,7 @@ func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Con
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
newRole := r.resourceBuilder.newScaleSetListenerRole(autoscalingListener)
|
||||
newRole := r.ResourceBuilder.newScaleSetListenerRole(autoscalingListener)
|
||||
|
||||
logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
|
||||
if err := r.Create(ctx, newRole); err != nil {
|
||||
@@ -637,7 +647,7 @@ func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Contex
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) {
|
||||
newRoleBinding := r.resourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
|
||||
newRoleBinding := r.ResourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
|
||||
|
||||
logger.Info("Creating listener role binding",
|
||||
"namespace", newRoleBinding.Namespace,
|
||||
@@ -722,3 +732,13 @@ func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error
|
||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func listenerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {
|
||||
for i := range pod.Status.ContainerStatuses {
|
||||
cs := &pod.Status.ContainerStatuses[i]
|
||||
if cs.Name == autoscalingListenerContainerName {
|
||||
return cs
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
autoscalingListenerTestTimeout = time.Second * 5
|
||||
autoscalingListenerTestTimeout = time.Second * 20
|
||||
autoscalingListenerTestInterval = time.Millisecond * 250
|
||||
autoscalingListenerTestGitHubToken = "gh_token"
|
||||
)
|
||||
@@ -351,6 +351,53 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated")
|
||||
})
|
||||
|
||||
It("It should re-create pod whenever listener container is terminated", func() {
|
||||
// Waiting for the pod is created
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return pod.Name, nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
|
||||
|
||||
oldPodUID := string(pod.UID)
|
||||
updated := pod.DeepCopy()
|
||||
updated.Status.ContainerStatuses = []corev1.ContainerStatus{
|
||||
{
|
||||
Name: autoscalingListenerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := k8sClient.Status().Update(ctx, updated)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update test pod")
|
||||
|
||||
// Waiting for the new pod is created
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
pod := new(corev1.Pod)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(pod.UID), nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be re-created")
|
||||
})
|
||||
|
||||
It("It should update mirror secrets to match secret used by AutoScalingRunnerSet", func() {
|
||||
// Waiting for the pod is created
|
||||
pod := new(corev1.Pod)
|
||||
@@ -373,7 +420,18 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update test secret")
|
||||
|
||||
updatedPod := pod.DeepCopy()
|
||||
updatedPod.Status.Phase = corev1.PodFailed
|
||||
// Ignore status running and consult the container state
|
||||
updatedPod.Status.Phase = corev1.PodRunning
|
||||
updatedPod.Status.ContainerStatuses = []corev1.ContainerStatus{
|
||||
{
|
||||
Name: autoscalingListenerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = k8sClient.Status().Update(ctx, updatedPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update test pod to failed")
|
||||
|
||||
@@ -420,6 +478,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
||||
var autoscalingListener *v1alpha1.AutoscalingListener
|
||||
|
||||
var runAsUser int64 = 1001
|
||||
const sidecarContainerName = "sidecar"
|
||||
|
||||
listenerPodTemplate := corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
@@ -432,7 +491,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "sidecar",
|
||||
Name: sidecarContainerName,
|
||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||
Image: "busybox",
|
||||
},
|
||||
@@ -525,7 +584,8 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
||||
return created.Finalizers[0], nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
|
||||
autoscalingListenerTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
|
||||
|
||||
// Check if config is created
|
||||
config := new(corev1.Secret)
|
||||
@@ -559,11 +619,100 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
||||
Expect(pod.Spec.Containers[0].SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
|
||||
Expect(pod.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways), "Pod should have the correct image pull policy")
|
||||
|
||||
Expect(pod.Spec.Containers[1].Name).To(Equal("sidecar"), "Pod should have the correct container name")
|
||||
Expect(pod.Spec.Containers[1].Name).To(Equal(sidecarContainerName), "Pod should have the correct container name")
|
||||
Expect(pod.Spec.Containers[1].Image).To(Equal("busybox"), "Pod should have the correct image")
|
||||
Expect(pod.Spec.Containers[1].ImagePullPolicy).To(Equal(corev1.PullIfNotPresent), "Pod should have the correct image pull policy")
|
||||
})
|
||||
})
|
||||
|
||||
Context("When AutoscalingListener pod has interuptions", func() {
|
||||
It("Should re-create pod when it is deleted", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return pod.Name, nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
|
||||
|
||||
Expect(len(pod.Spec.Containers)).To(Equal(2), "Pod should have 2 containers")
|
||||
oldPodUID := string(pod.UID)
|
||||
|
||||
err := k8sClient.Delete(ctx, pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
|
||||
|
||||
pod = new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(pod.UID), nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be created")
|
||||
})
|
||||
|
||||
It("Should re-create pod when the listener pod is terminated", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return pod.Name, nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
|
||||
|
||||
updated := pod.DeepCopy()
|
||||
oldPodUID := string(pod.UID)
|
||||
updated.Status.ContainerStatuses = []corev1.ContainerStatus{
|
||||
{
|
||||
Name: autoscalingListenerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: sidecarContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Running: &corev1.ContainerStateRunning{},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := k8sClient.Status().Update(ctx, updated)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update pod status")
|
||||
|
||||
pod = new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(pod.UID), nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be created")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test AutoScalingListener controller with proxy", func() {
|
||||
@@ -887,7 +1036,6 @@ var _ = Describe("Test AutoScalingListener controller with template modification
|
||||
|
||||
g.Expect(pod.ObjectMeta.Annotations).To(HaveKeyWithValue("test-annotation-key", "test-annotation-value"), "pod annotations should be copied from runner set template")
|
||||
g.Expect(pod.ObjectMeta.Labels).To(HaveKeyWithValue("test-label-key", "test-label-value"), "pod labels should be copied from runner set template")
|
||||
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval).Should(Succeed(), "failed to create listener pod with proxy details")
|
||||
|
||||
@@ -79,8 +79,7 @@ type AutoscalingRunnerSetReconciler struct {
|
||||
DefaultRunnerScaleSetListenerImagePullSecrets []string
|
||||
UpdateStrategy UpdateStrategy
|
||||
ActionsClient actions.MultiClient
|
||||
|
||||
resourceBuilder resourceBuilder
|
||||
ResourceBuilder
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -623,7 +622,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) {
|
||||
desiredRunnerSet, err := r.resourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||
desiredRunnerSet, err := r.ResourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
log.Error(err, "Could not create EphemeralRunnerSet")
|
||||
return ctrl.Result{}, err
|
||||
@@ -652,7 +651,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
|
||||
})
|
||||
}
|
||||
|
||||
autoscalingListener, err := r.resourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
|
||||
autoscalingListener, err := r.ResourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
|
||||
if err != nil {
|
||||
log.Error(err, "Could not create AutoscalingListener spec")
|
||||
return ctrl.Result{}, err
|
||||
|
||||
@@ -34,7 +34,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
autoscalingRunnerSetTestTimeout = time.Second * 5
|
||||
autoscalingRunnerSetTestTimeout = time.Second * 20
|
||||
autoscalingRunnerSetTestInterval = time.Millisecond * 250
|
||||
autoscalingRunnerSetTestGitHubToken = "gh_token"
|
||||
)
|
||||
|
||||
@@ -49,10 +49,10 @@ const (
|
||||
// EphemeralRunnerReconciler reconciles a EphemeralRunner object
|
||||
type EphemeralRunnerReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
ActionsClient actions.MultiClient
|
||||
resourceBuilder resourceBuilder
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
ActionsClient actions.MultiClient
|
||||
ResourceBuilder
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -178,7 +178,9 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
|
||||
if ephemeralRunner.Status.RunnerId == 0 {
|
||||
log.Info("Creating new ephemeral runner registration and updating status with runner config")
|
||||
return r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log)
|
||||
if r, err := r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log); r != nil {
|
||||
return *r, err
|
||||
}
|
||||
}
|
||||
|
||||
secret := new(corev1.Secret)
|
||||
@@ -189,7 +191,17 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
}
|
||||
// create secret if not created
|
||||
log.Info("Creating new ephemeral runner secret for jitconfig.")
|
||||
return r.createSecret(ctx, ephemeralRunner, log)
|
||||
if r, err := r.createSecret(ctx, ephemeralRunner, log); r != nil {
|
||||
return *r, err
|
||||
}
|
||||
|
||||
// Retry to get the secret that was just created.
|
||||
// Otherwise, even though we want to continue to create the pod,
|
||||
// it fails due to the missing secret resulting in an invalid pod spec.
|
||||
if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
|
||||
log.Error(err, "Failed to fetch secret")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
@@ -511,12 +523,12 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
|
||||
|
||||
// updateStatusWithRunnerConfig fetches runtime configuration needed by the runner
|
||||
// This method should always set .status.runnerId and .status.runnerJITConfig
|
||||
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
|
||||
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
|
||||
// Runner is not registered with the service. We need to register it first
|
||||
log.Info("Creating ephemeral runner JIT config")
|
||||
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %v", err)
|
||||
}
|
||||
|
||||
jitSettings := &actions.RunnerScaleSetJitRunnerSetting{
|
||||
@@ -534,12 +546,12 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
|
||||
if err != nil {
|
||||
actionsError := &actions.ActionsError{}
|
||||
if !errors.As(err, &actionsError) {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %v", err)
|
||||
}
|
||||
|
||||
if actionsError.StatusCode != http.StatusConflict ||
|
||||
!actionsError.IsException("AgentExistsException") {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %v", err)
|
||||
}
|
||||
|
||||
// If the runner with the name we want already exists it means:
|
||||
@@ -552,12 +564,12 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
|
||||
log.Info("Getting runner jit config failed with conflict error, trying to get the runner by name", "runnerName", ephemeralRunner.Name)
|
||||
existingRunner, err := actionsClient.GetRunnerByName(ctx, ephemeralRunner.Name)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get runner by name: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to get runner by name: %v", err)
|
||||
}
|
||||
|
||||
if existingRunner == nil {
|
||||
log.Info("Runner with the same name does not exist, re-queuing the reconciliation")
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
return &ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
log.Info("Found the runner with the same name", "runnerId", existingRunner.Id, "runnerScaleSetId", existingRunner.RunnerScaleSetId)
|
||||
@@ -565,16 +577,16 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
|
||||
log.Info("Removing the runner with the same name")
|
||||
err := actionsClient.RemoveRunner(ctx, int64(existingRunner.Id))
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Removed the runner with the same name, re-queuing the reconciliation")
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
return &ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
// TODO: Do we want to mark the ephemeral runner as failed, and let EphemeralRunnerSet to clean it up, so we can recover from this situation?
|
||||
// The situation is that the EphemeralRunner's name is already used by something else to register a runner, and we can't take the control back.
|
||||
return ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %v", err)
|
||||
}
|
||||
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
|
||||
|
||||
@@ -585,11 +597,20 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
|
||||
obj.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
|
||||
})
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %v", err)
|
||||
}
|
||||
|
||||
// We want to continue without a requeue for faster pod creation.
|
||||
//
|
||||
// To do so, we update the status in-place, so that both continuing the loop and
|
||||
// and requeuing and skipping updateStatusWithRunnerConfig in the next loop, will
|
||||
// have the same effect.
|
||||
ephemeralRunner.Status.RunnerId = jitConfig.Runner.Id
|
||||
ephemeralRunner.Status.RunnerName = jitConfig.Runner.Name
|
||||
ephemeralRunner.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
|
||||
|
||||
log.Info("Updated ephemeral runner status with runnerId and runnerJITConfig")
|
||||
return ctrl.Result{}, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) {
|
||||
@@ -642,7 +663,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
|
||||
}
|
||||
|
||||
log.Info("Creating new pod for ephemeral runner")
|
||||
newPod := r.resourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...)
|
||||
newPod := r.ResourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...)
|
||||
|
||||
if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil {
|
||||
log.Error(err, "Failed to set controller reference to a new pod")
|
||||
@@ -665,21 +686,21 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
|
||||
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
|
||||
log.Info("Creating new secret for ephemeral runner")
|
||||
jitSecret := r.resourceBuilder.newEphemeralRunnerJitSecret(runner)
|
||||
jitSecret := r.ResourceBuilder.newEphemeralRunnerJitSecret(runner)
|
||||
|
||||
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to set controller reference: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Created new secret spec for ephemeral runner")
|
||||
if err := r.Create(ctx, jitSecret); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to create jit secret: %v", err)
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to create jit secret: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name)
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// updateRunStatusFromPod is responsible for updating non-exiting statuses.
|
||||
@@ -823,12 +844,14 @@ func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context,
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.EphemeralRunner{}).
|
||||
Owns(&corev1.Pod{}).
|
||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||
Complete(r)
|
||||
func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager, opts ...Option) error {
|
||||
return builderWithOptions(
|
||||
ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.EphemeralRunner{}).
|
||||
Owns(&corev1.Pod{}).
|
||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}),
|
||||
opts,
|
||||
).Complete(r)
|
||||
}
|
||||
|
||||
func runnerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {
|
||||
|
||||
@@ -29,9 +29,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
timeout = time.Second * 10
|
||||
interval = time.Millisecond * 250
|
||||
runnerImage = "ghcr.io/actions/actions-runner:latest"
|
||||
ephemeralRunnerTimeout = time.Second * 20
|
||||
ephemeralRunnerInterval = time.Millisecond * 250
|
||||
runnerImage = "ghcr.io/actions/actions-runner:latest"
|
||||
)
|
||||
|
||||
func newExampleRunner(name, namespace, configSecretName string) *v1alpha1.EphemeralRunner {
|
||||
@@ -133,8 +133,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
n := len(created.Finalizers) // avoid capacity mismatch
|
||||
return created.Finalizers[:n:n], nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo([]string{ephemeralRunnerFinalizerName, ephemeralRunnerActionsFinalizerName}))
|
||||
|
||||
Eventually(
|
||||
@@ -147,8 +147,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
_, ok := secret.Data[jitTokenKey]
|
||||
return ok, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
Eventually(
|
||||
@@ -160,8 +160,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
|
||||
return pod.Name, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(ephemeralRunner.Name))
|
||||
})
|
||||
|
||||
@@ -184,8 +184,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
@@ -203,7 +203,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
return "", nil
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
}, timeout, interval).Should(BeEquivalentTo(corev1.PodFailed))
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodFailed))
|
||||
Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
|
||||
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
|
||||
})
|
||||
@@ -247,8 +247,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
// create runner linked secret
|
||||
@@ -273,8 +273,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
err = k8sClient.Delete(ctx, ephemeralRunner)
|
||||
@@ -289,8 +289,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return kerrors.IsNotFound(err), nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
Eventually(
|
||||
@@ -302,8 +302,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return kerrors.IsNotFound(err), nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
Eventually(
|
||||
@@ -315,8 +315,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return kerrors.IsNotFound(err), nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
Eventually(
|
||||
@@ -328,8 +328,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return kerrors.IsNotFound(err), nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
Eventually(
|
||||
@@ -341,8 +341,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return kerrors.IsNotFound(err), nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
@@ -356,8 +356,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return updatedEphemeralRunner.Status.RunnerId, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeNumerically(">", 0))
|
||||
})
|
||||
|
||||
@@ -371,8 +371,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
for _, phase := range []corev1.PodPhase{corev1.PodRunning, corev1.PodPending} {
|
||||
@@ -395,8 +395,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(phase))
|
||||
}
|
||||
})
|
||||
@@ -411,8 +411,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.Phase = corev1.PodRunning
|
||||
@@ -427,7 +427,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
},
|
||||
timeout,
|
||||
ephemeralRunnerTimeout,
|
||||
).Should(BeEquivalentTo(""))
|
||||
})
|
||||
|
||||
@@ -462,8 +462,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
return false, fmt.Errorf("pod haven't failed for 5 times.")
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true), "we should stop creating pod after 5 failures")
|
||||
|
||||
// In case we still have pod created due to controller-runtime cache delay, mark the container as exited
|
||||
@@ -488,7 +488,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
return "", err
|
||||
}
|
||||
return updated.Status.Reason, nil
|
||||
}, timeout, interval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures")
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures")
|
||||
|
||||
// EphemeralRunner should not have any pod
|
||||
Eventually(func() (bool, error) {
|
||||
@@ -497,7 +497,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
return false, nil
|
||||
}
|
||||
return kerrors.IsNotFound(err), nil
|
||||
}, timeout, interval).Should(BeEquivalentTo(true))
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should re-create pod on eviction", func() {
|
||||
@@ -510,8 +510,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
@@ -530,7 +530,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
return false, err
|
||||
}
|
||||
return len(updated.Status.Failures) == 1, nil
|
||||
}, timeout, interval).Should(BeEquivalentTo(true))
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||
|
||||
// should re-create after failure
|
||||
Eventually(
|
||||
@@ -541,8 +541,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
@@ -555,8 +555,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
@@ -577,7 +577,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
return false, err
|
||||
}
|
||||
return len(updated.Status.Failures) == 1, nil
|
||||
}, timeout, interval).Should(BeEquivalentTo(true))
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||
|
||||
// should re-create after failure
|
||||
Eventually(
|
||||
@@ -588,8 +588,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
@@ -602,8 +602,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
// first set phase to running
|
||||
@@ -627,8 +627,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(corev1.PodRunning))
|
||||
|
||||
// set phase to succeeded
|
||||
@@ -644,7 +644,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
},
|
||||
timeout,
|
||||
ephemeralRunnerTimeout,
|
||||
).Should(BeEquivalentTo(corev1.PodRunning))
|
||||
})
|
||||
})
|
||||
@@ -700,7 +700,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}, timeout, interval).Should(BeEquivalentTo(true))
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: EphemeralRunnerContainerName,
|
||||
@@ -720,7 +720,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
return "", nil
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
}, timeout, interval).Should(BeEquivalentTo(corev1.PodSucceeded))
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodSucceeded))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -800,7 +800,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
return proxySuccessfulllyCalled
|
||||
},
|
||||
2*time.Second,
|
||||
interval,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
@@ -825,8 +825,8 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
g.Expect(err).To(BeNil(), "failed to get ephemeral runner pod")
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(Succeed(), "failed to get ephemeral runner pod")
|
||||
|
||||
Expect(pod.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{
|
||||
@@ -958,7 +958,7 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
return serverSuccessfullyCalled
|
||||
},
|
||||
2*time.Second,
|
||||
interval,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue(), "failed to contact server")
|
||||
})
|
||||
})
|
||||
|
||||
@@ -53,7 +53,7 @@ type EphemeralRunnerSetReconciler struct {
|
||||
|
||||
PublishMetrics bool
|
||||
|
||||
resourceBuilder resourceBuilder
|
||||
ResourceBuilder
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -360,7 +360,7 @@ func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Contex
|
||||
// Track multiple errors at once and return the bundle.
|
||||
errs := make([]error, 0)
|
||||
for i := 0; i < count; i++ {
|
||||
ephemeralRunner := r.resourceBuilder.newEphemeralRunner(runnerSet)
|
||||
ephemeralRunner := r.ResourceBuilder.newEphemeralRunner(runnerSet)
|
||||
if runnerSet.Spec.EphemeralRunnerSpec.Proxy != nil {
|
||||
ephemeralRunner.Spec.ProxySecretRef = proxyEphemeralRunnerSetSecretName(runnerSet)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
ephemeralRunnerSetTestTimeout = time.Second * 10
|
||||
ephemeralRunnerSetTestTimeout = time.Second * 20
|
||||
ephemeralRunnerSetTestInterval = time.Millisecond * 250
|
||||
ephemeralRunnerSetTestGitHubToken = "gh_token"
|
||||
)
|
||||
@@ -794,7 +794,6 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
|
||||
}
|
||||
|
||||
return len(runnerList.Items), nil
|
||||
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval,
|
||||
@@ -1359,7 +1358,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
|
||||
return proxySuccessfulllyCalled
|
||||
},
|
||||
2*time.Second,
|
||||
interval,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
})
|
||||
|
||||
90
controllers/actions.github.com/options.go
Normal file
90
controllers/actions.github.com/options.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
)
|
||||
|
||||
// Options is the optional configuration for the controllers, which can be
|
||||
// set via command-line flags or environment variables.
|
||||
type Options struct {
|
||||
// RunnerMaxConcuncurrentReconciles is the maximum number of concurrent Reconciles which can be run
|
||||
// by the EphemeralRunnerController.
|
||||
RunnerMaxConcuncurrentReconciles int
|
||||
}
|
||||
|
||||
// OptionsWithDefault returns the default options.
|
||||
// This is here to maintain the options and their default values in one place,
|
||||
// rather than having to correlate those in multiple places.
|
||||
func OptionsWithDefault() Options {
|
||||
return Options{
|
||||
RunnerMaxConcuncurrentReconciles: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// LoadEnv loads the options from the environment variables.
|
||||
// This updates the option value only if the environment variable is set.
|
||||
// If the option is already set (via a command-line flag), the value from the environment variable takes precedence.
|
||||
func (o *Options) LoadEnv() error {
|
||||
v, err := o.getEnvInt("RUNNER_MAX_CONCURRENT_RECONCILES")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if v != nil {
|
||||
o.RunnerMaxConcuncurrentReconciles = *v
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Options) getEnvInt(name string) (*int, error) {
|
||||
s := os.Getenv(name)
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
v, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert %s=%s to int: %w", name, s, err)
|
||||
}
|
||||
|
||||
return &v, nil
|
||||
}
|
||||
|
||||
type Option func(*controller.Options)
|
||||
|
||||
// WithMaxConcurrentReconciles sets the maximum number of concurrent Reconciles which can be run.
|
||||
//
|
||||
// This is useful to improve the throughput of the controller, but it may also increase the load on the API server and
|
||||
// the external service (e.g. GitHub API). The default value is 1, as defined by the controller-runtime.
|
||||
//
|
||||
// See https://github.com/actions/actions-runner-controller/issues/3021 for more information
|
||||
// on real-world use cases and the potential impact of this option.
|
||||
func WithMaxConcurrentReconciles(n int) Option {
|
||||
return func(b *controller.Options) {
|
||||
b.MaxConcurrentReconciles = n
|
||||
}
|
||||
}
|
||||
|
||||
// builderWithOptions applies the given options to the provided builder, if any.
|
||||
// This is a helper function to avoid the need to import the controller-runtime package in every reconciler source file
|
||||
// and the command package that creates the controller.
|
||||
// This is also useful for reducing code duplication around setting controller options in
|
||||
// multiple reconcilers.
|
||||
func builderWithOptions(b *builder.Builder, opts []Option) *builder.Builder {
|
||||
if len(opts) == 0 {
|
||||
return b
|
||||
}
|
||||
|
||||
var controllerOpts controller.Options
|
||||
for _, opt := range opts {
|
||||
opt(&controllerOpts)
|
||||
}
|
||||
|
||||
return b.WithOptions(controllerOpts)
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"math"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
@@ -68,9 +69,11 @@ func SetListenerEntrypoint(entrypoint string) {
|
||||
}
|
||||
}
|
||||
|
||||
type resourceBuilder struct{}
|
||||
type ResourceBuilder struct {
|
||||
ExcludeLabelPropagationPrefixes []string
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
||||
func (b *ResourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -85,7 +88,7 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
|
||||
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
|
||||
}
|
||||
|
||||
labels := mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
|
||||
labels := b.mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
@@ -150,7 +153,7 @@ func (lm *listenerMetricsServerConfig) containerPort() (corev1.ContainerPort, er
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
|
||||
func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
|
||||
var (
|
||||
metricsAddr = ""
|
||||
metricsEndpoint = ""
|
||||
@@ -213,7 +216,7 @@ func (b *resourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
|
||||
func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
|
||||
listenerEnv := []corev1.EnvVar{
|
||||
{
|
||||
Name: "LISTENER_CONFIG_PATH",
|
||||
@@ -406,12 +409,12 @@ func mergeListenerContainer(base, from *corev1.Container) {
|
||||
base.TTY = from.TTY
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
|
||||
func (b *ResourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
|
||||
return &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerServiceAccountName(autoscalingListener),
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||
}),
|
||||
@@ -419,14 +422,14 @@ func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener
|
||||
}
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.AutoscalingListener) *rbacv1.Role {
|
||||
func (b *ResourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.AutoscalingListener) *rbacv1.Role {
|
||||
rules := rulesForListenerRole([]string{autoscalingListener.Spec.EphemeralRunnerSetName})
|
||||
rulesHash := hash.ComputeTemplateHash(&rules)
|
||||
newRole := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerRoleName(autoscalingListener),
|
||||
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||
labelKeyListenerNamespace: autoscalingListener.Namespace,
|
||||
@@ -440,7 +443,7 @@ func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.
|
||||
return newRole
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount) *rbacv1.RoleBinding {
|
||||
func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount) *rbacv1.RoleBinding {
|
||||
roleRef := rbacv1.RoleRef{
|
||||
Kind: "Role",
|
||||
Name: listenerRole.Name,
|
||||
@@ -460,7 +463,7 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerRoleName(autoscalingListener),
|
||||
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||
labelKeyListenerNamespace: autoscalingListener.Namespace,
|
||||
@@ -476,14 +479,14 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
|
||||
return newRoleBinding
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
|
||||
func (b *ResourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
|
||||
dataHash := hash.ComputeTemplateHash(&secret.Data)
|
||||
|
||||
newListenerSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerSecretMirrorName(autoscalingListener),
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||
"secret-data-hash": dataHash,
|
||||
@@ -495,14 +498,14 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
|
||||
return newListenerSecret
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
||||
func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||
|
||||
labels := mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
|
||||
labels := b.mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent: "runner-set",
|
||||
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||
@@ -515,7 +518,6 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
|
||||
}
|
||||
|
||||
newAnnotations := map[string]string{
|
||||
|
||||
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
|
||||
AnnotationKeyGitHubRunnerScaleSetName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName],
|
||||
annotationKeyRunnerSpecHash: runnerSpecHash,
|
||||
@@ -545,7 +547,7 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
|
||||
return newEphemeralRunnerSet, nil
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
|
||||
func (b *ResourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
|
||||
labels := make(map[string]string)
|
||||
for k, v := range ephemeralRunnerSet.Labels {
|
||||
if k == LabelKeyKubernetesComponent {
|
||||
@@ -572,7 +574,7 @@ func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme
|
||||
}
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
|
||||
func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
|
||||
var newPod corev1.Pod
|
||||
|
||||
labels := map[string]string{}
|
||||
@@ -640,7 +642,7 @@ func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
|
||||
return &newPod
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret {
|
||||
func (b *ResourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ephemeralRunner.Name,
|
||||
@@ -748,14 +750,26 @@ func trimLabelValue(val string) string {
|
||||
return val
|
||||
}
|
||||
|
||||
func mergeLabels(base, overwrite map[string]string) map[string]string {
|
||||
mergedLabels := map[string]string{}
|
||||
func (b *ResourceBuilder) mergeLabels(base, overwrite map[string]string) map[string]string {
|
||||
mergedLabels := make(map[string]string, len(base))
|
||||
|
||||
base:
|
||||
for k, v := range base {
|
||||
for _, prefix := range b.ExcludeLabelPropagationPrefixes {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
continue base
|
||||
}
|
||||
}
|
||||
mergedLabels[k] = v
|
||||
}
|
||||
|
||||
overwrite:
|
||||
for k, v := range overwrite {
|
||||
for _, prefix := range b.ExcludeLabelPropagationPrefixes {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
continue overwrite
|
||||
}
|
||||
}
|
||||
mergedLabels[k] = v
|
||||
}
|
||||
|
||||
|
||||
@@ -19,9 +19,13 @@ func TestLabelPropagation(t *testing.T) {
|
||||
Name: "test-scale-set",
|
||||
Namespace: "test-ns",
|
||||
Labels: map[string]string{
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesVersion: "0.2.0",
|
||||
"arbitrary-label": "random-value",
|
||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||
LabelKeyKubernetesVersion: "0.2.0",
|
||||
"arbitrary-label": "random-value",
|
||||
"example.com/label": "example-value",
|
||||
"example.com/example": "example-value",
|
||||
"directly.excluded.org/label": "excluded-value",
|
||||
"directly.excluded.org/arbitrary": "not-excluded-value",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
runnerScaleSetIdAnnotationKey: "1",
|
||||
@@ -34,7 +38,12 @@ func TestLabelPropagation(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
var b resourceBuilder
|
||||
b := ResourceBuilder{
|
||||
ExcludeLabelPropagationPrefixes: []string{
|
||||
"example.com/",
|
||||
"directly.excluded.org/label",
|
||||
},
|
||||
}
|
||||
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(&autoscalingRunnerSet)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf])
|
||||
@@ -63,6 +72,11 @@ func TestLabelPropagation(t *testing.T) {
|
||||
assert.Equal(t, "repo", listener.Labels[LabelKeyGitHubRepository])
|
||||
assert.Equal(t, autoscalingRunnerSet.Labels["arbitrary-label"], listener.Labels["arbitrary-label"])
|
||||
|
||||
assert.NotContains(t, listener.Labels, "example.com/label")
|
||||
assert.NotContains(t, listener.Labels, "example.com/example")
|
||||
assert.NotContains(t, listener.Labels, "directly.excluded.org/label")
|
||||
assert.Equal(t, "not-excluded-value", listener.Labels["directly.excluded.org/arbitrary"])
|
||||
|
||||
listenerServiceAccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
@@ -128,7 +142,7 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
|
||||
GitHubConfigUrl: fmt.Sprintf("https://github.com/%s/%s", organization, repository),
|
||||
}
|
||||
|
||||
var b resourceBuilder
|
||||
var b ResourceBuilder
|
||||
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 0)
|
||||
@@ -152,7 +166,7 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
|
||||
GitHubConfigUrl: fmt.Sprintf("https://github.com/enterprises/%s", enterprise),
|
||||
}
|
||||
|
||||
var b resourceBuilder
|
||||
var b ResourceBuilder
|
||||
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 63)
|
||||
|
||||
@@ -42,7 +42,7 @@ eliminate some duplication:
|
||||
`-coverprofile` flags: while `-short` is used to skip [old ARC E2E
|
||||
tests](https://github.com/actions/actions-runner-controller/blob/master/test/e2e/e2e_test.go#L85-L87),
|
||||
`-coverprofile` is adding to the test time without really giving us any value
|
||||
in return. We should also start using `actions/setup-go@v4` to take advantage
|
||||
in return. We should also start using `actions/setup-go@v5` to take advantage
|
||||
of caching (it would speed up our tests by a lot) or enable it on `v3` if we
|
||||
have a strong reason not to upgrade. We should keep ignoring our E2E tests too
|
||||
as those will be run elsewhere (either use `Short` there too or ignoring the
|
||||
|
||||
@@ -43,6 +43,16 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
|
||||
|
||||
## Changelog
|
||||
|
||||
### v0.9.3
|
||||
|
||||
1. AutoscalingListener controller: Inspect listener container state instead of pod phase [#3548](https://github.com/actions/actions-runner-controller/pull/3548)
|
||||
1. Exclude label prefix propagation [#3607](https://github.com/actions/actions-runner-controller/pull/3607)
|
||||
1. Check status code of fetch access token for github app [#3568](https://github.com/actions/actions-runner-controller/pull/3568)
|
||||
1. Remove .Named() from the ephemeral runner controller [#3596](https://github.com/actions/actions-runner-controller/pull/3596)
|
||||
1. Customize work directory [#3477](https://github.com/actions/actions-runner-controller/pull/3477)
|
||||
1. Fix problem with ephemeralRunner Succeeded state before build executed [#3528](https://github.com/actions/actions-runner-controller/pull/3528)
|
||||
1. Remove finalizers in one pass to speed up cleanups AutoscalingRunnerSet [#3536](https://github.com/actions/actions-runner-controller/pull/3536)
|
||||
|
||||
### v0.9.2
|
||||
|
||||
1. Refresh session if token expires during delete message [#3529](https://github.com/actions/actions-runner-controller/pull/3529)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@@ -1054,6 +1055,14 @@ func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, c
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusCreated {
|
||||
return nil, &GitHubAPIError{
|
||||
StatusCode: resp.StatusCode,
|
||||
RequestID: resp.Header.Get(HeaderGitHubRequestID),
|
||||
Err: fmt.Errorf("failed to get access token for GitHub App auth: %v", resp.Status),
|
||||
}
|
||||
}
|
||||
|
||||
// Format: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app
|
||||
var accessToken *accessToken
|
||||
if err = json.NewDecoder(resp.Body).Decode(&accessToken); err != nil {
|
||||
@@ -1131,15 +1140,30 @@ func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *regis
|
||||
}
|
||||
|
||||
retry++
|
||||
if retry > 3 {
|
||||
if retry > 5 {
|
||||
return nil, fmt.Errorf("unable to register runner after 3 retries: %w", &GitHubAPIError{
|
||||
StatusCode: resp.StatusCode,
|
||||
RequestID: resp.Header.Get(HeaderGitHubRequestID),
|
||||
Err: innerErr,
|
||||
})
|
||||
}
|
||||
time.Sleep(time.Duration(500 * int(time.Millisecond) * (retry + 1)))
|
||||
// Add exponential backoff + jitter to avoid thundering herd
|
||||
// This will generate a backoff schedule:
|
||||
// 1: 1s
|
||||
// 2: 3s
|
||||
// 3: 4s
|
||||
// 4: 8s
|
||||
// 5: 17s
|
||||
baseDelay := 500 * time.Millisecond
|
||||
jitter := time.Duration(rand.Intn(1000))
|
||||
maxDelay := 20 * time.Second
|
||||
delay := baseDelay*(1<<retry) + jitter
|
||||
|
||||
if delay > maxDelay {
|
||||
delay = maxDelay
|
||||
}
|
||||
|
||||
time.Sleep(delay)
|
||||
}
|
||||
|
||||
var actionsServiceAdminConnection *ActionsServiceAdminConnection
|
||||
|
||||
@@ -170,7 +170,7 @@ func TestNewActionsServiceRequest(t *testing.T) {
|
||||
}
|
||||
failures := 0
|
||||
unauthorizedHandler := func(w http.ResponseWriter, r *http.Request) {
|
||||
if failures < 2 {
|
||||
if failures < 5 {
|
||||
failures++
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
|
||||
4
go.mod
4
go.mod
@@ -14,7 +14,7 @@ require (
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
|
||||
github.com/gruntwork-io/terratest v0.46.7
|
||||
github.com/hashicorp/go-retryablehttp v0.7.5
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/ginkgo/v2 v2.17.1
|
||||
@@ -91,7 +91,7 @@ require (
|
||||
github.com/urfave/cli v1.22.2 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/sys v0.20.0 // indirect
|
||||
golang.org/x/term v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.4.0 // indirect
|
||||
|
||||
18
go.sum
18
go.sum
@@ -35,6 +35,8 @@ github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
|
||||
github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
|
||||
github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
@@ -113,12 +115,12 @@ github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/U
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
|
||||
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
@@ -145,8 +147,12 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
|
||||
github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 h1:ofNAzWCcyTALn2Zv40+8XitdzCgXY6e9qvXwN9W0YXg=
|
||||
github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
|
||||
@@ -286,8 +292,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
|
||||
47
main.go
47
main.go
@@ -94,13 +94,16 @@ func main() {
|
||||
runnerImagePullSecrets stringSlice
|
||||
runnerPodDefaults actionssummerwindnet.RunnerPodDefaults
|
||||
|
||||
namespace string
|
||||
logLevel string
|
||||
logFormat string
|
||||
watchSingleNamespace string
|
||||
namespace string
|
||||
logLevel string
|
||||
logFormat string
|
||||
watchSingleNamespace string
|
||||
excludeLabelPropagationPrefixes stringSlice
|
||||
|
||||
autoScalerImagePullSecrets stringSlice
|
||||
|
||||
opts = actionsgithubcom.OptionsWithDefault()
|
||||
|
||||
commonRunnerLabels commaSeparatedStringSlice
|
||||
)
|
||||
var c github.Config
|
||||
@@ -135,9 +138,11 @@ func main() {
|
||||
flag.DurationVar(&defaultScaleDownDelay, "default-scale-down-delay", actionssummerwindnet.DefaultScaleDownDelay, "The approximate delay for a scale down followed by a scale up, used to prevent flapping (down->up->down->... loop)")
|
||||
flag.IntVar(&port, "port", 9443, "The port to which the admission webhook endpoint should bind")
|
||||
flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.")
|
||||
flag.IntVar(&opts.RunnerMaxConcuncurrentReconciles, "runner-max-concurrent-reconciles", opts.RunnerMaxConcuncurrentReconciles, "The maximum number of concurrent reconciles which can be run by the EphemeralRunner controller. Increase this value to improve the throughput of the controller, but it may also increase the load on the API server and the external service (e.g. GitHub API).")
|
||||
flag.Var(&commonRunnerLabels, "common-runner-labels", "Runner labels in the K1=V1,K2=V2,... format that are inherited all the runners created by the controller. See https://github.com/actions/actions-runner-controller/issues/321 for more information")
|
||||
flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.")
|
||||
flag.StringVar(&watchSingleNamespace, "watch-single-namespace", "", "Restrict to watch for custom resources in a single namespace.")
|
||||
flag.Var(&excludeLabelPropagationPrefixes, "exclude-label-propagation-prefix", "The list of prefixes that should be excluded from label propagation")
|
||||
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
|
||||
flag.StringVar(&logFormat, "log-format", "text", `The log format. Valid options are "text" and "json". Defaults to "text"`)
|
||||
flag.BoolVar(&autoScalingRunnerSetOnly, "auto-scaling-runner-set-only", false, "Make controller only reconcile AutoRunnerScaleSet object.")
|
||||
@@ -154,6 +159,12 @@ func main() {
|
||||
}
|
||||
c.Log = &log
|
||||
|
||||
if err := opts.LoadEnv(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: loading environment variables: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
log.Info("Using options", "runner-max-concurrent-reconciles", opts.RunnerMaxConcuncurrentReconciles)
|
||||
|
||||
if !autoScalingRunnerSetOnly {
|
||||
ghClient, err = c.NewClient()
|
||||
if err != nil {
|
||||
@@ -258,6 +269,10 @@ func main() {
|
||||
log.WithName("actions-clients"),
|
||||
)
|
||||
|
||||
rb := actionsgithubcom.ResourceBuilder{
|
||||
ExcludeLabelPropagationPrefixes: excludeLabelPropagationPrefixes,
|
||||
}
|
||||
|
||||
if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("AutoscalingRunnerSet").WithValues("version", build.Version),
|
||||
@@ -267,27 +282,30 @@ func main() {
|
||||
ActionsClient: actionsMultiClient,
|
||||
UpdateStrategy: actionsgithubcom.UpdateStrategy(updateStrategy),
|
||||
DefaultRunnerScaleSetListenerImagePullSecrets: autoScalerImagePullSecrets,
|
||||
ResourceBuilder: rb,
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "AutoscalingRunnerSet")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&actionsgithubcom.EphemeralRunnerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("EphemeralRunner").WithValues("version", build.Version),
|
||||
Scheme: mgr.GetScheme(),
|
||||
ActionsClient: actionsMultiClient,
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("EphemeralRunner").WithValues("version", build.Version),
|
||||
Scheme: mgr.GetScheme(),
|
||||
ActionsClient: actionsMultiClient,
|
||||
ResourceBuilder: rb,
|
||||
}).SetupWithManager(mgr, actionsgithubcom.WithMaxConcurrentReconciles(opts.RunnerMaxConcuncurrentReconciles)); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "EphemeralRunner")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("EphemeralRunnerSet").WithValues("version", build.Version),
|
||||
Scheme: mgr.GetScheme(),
|
||||
ActionsClient: actionsMultiClient,
|
||||
PublishMetrics: metricsAddr != "0",
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("EphemeralRunnerSet").WithValues("version", build.Version),
|
||||
Scheme: mgr.GetScheme(),
|
||||
ActionsClient: actionsMultiClient,
|
||||
PublishMetrics: metricsAddr != "0",
|
||||
ResourceBuilder: rb,
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "EphemeralRunnerSet")
|
||||
os.Exit(1)
|
||||
@@ -299,6 +317,7 @@ func main() {
|
||||
Scheme: mgr.GetScheme(),
|
||||
ListenerMetricsAddr: listenerMetricsAddr,
|
||||
ListenerMetricsEndpoint: listenerMetricsEndpoint,
|
||||
ResourceBuilder: rb,
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "AutoscalingListener")
|
||||
os.Exit(1)
|
||||
|
||||
@@ -6,7 +6,7 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless
|
||||
OS_IMAGE ?= ubuntu-22.04
|
||||
TARGETPLATFORM ?= $(shell arch)
|
||||
|
||||
RUNNER_VERSION ?= 2.317.0
|
||||
RUNNER_VERSION ?= 2.319.1
|
||||
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.6.1
|
||||
DOCKER_VERSION ?= 24.0.7
|
||||
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
RUNNER_VERSION=2.317.0
|
||||
RUNNER_VERSION=2.319.1
|
||||
RUNNER_CONTAINER_HOOKS_VERSION=0.6.1
|
||||
@@ -36,7 +36,7 @@ var (
|
||||
|
||||
testResultCMNamePrefix = "test-result-"
|
||||
|
||||
RunnerVersion = "2.317.0"
|
||||
RunnerVersion = "2.319.1"
|
||||
RunnerContainerHooksVersion = "0.6.1"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package testing
|
||||
|
||||
const (
|
||||
ActionsCheckout = "actions/checkout@v3"
|
||||
ActionsCheckout = "actions/checkout@v4"
|
||||
)
|
||||
|
||||
type Workflow struct {
|
||||
|
||||
Reference in New Issue
Block a user