Compare commits

..

1 Commits

Author SHA1 Message Date
Nikola Jokic
7f15ae7f05 extending ephemeral runner statuses 2025-11-10 14:20:27 +01:00
66 changed files with 1658 additions and 1926 deletions

View File

@@ -0,0 +1,215 @@
name: 'Execute and Assert ARC E2E Test Action'
description: 'Queue E2E test workflow and assert workflow run result to be succeed'
inputs:
auth-token:
description: 'GitHub access token to queue workflow run'
required: true
repo-owner:
description: "The repository owner name that has the test workflow file, ex: actions"
required: true
repo-name:
description: "The repository name that has the test workflow file, ex: test"
required: true
workflow-file:
description: 'The file name of the workflow yaml, ex: test.yml'
required: true
arc-name:
description: 'The name of the configured gha-runner-scale-set'
required: true
arc-namespace:
description: 'The namespace of the configured gha-runner-scale-set'
required: true
arc-controller-namespace:
description: 'The namespace of the configured gha-runner-scale-set-controller'
required: true
wait-to-finish:
description: 'Wait for the workflow run to finish'
required: true
default: "true"
wait-to-running:
description: 'Wait for the workflow run to start running'
required: true
default: "false"
runs:
using: "composite"
steps:
- name: Queue test workflow
shell: bash
id: queue_workflow
run: |
queue_time=`date +%FT%TZ`
echo "queue_time=$queue_time" >> $GITHUB_OUTPUT
curl -X POST https://api.github.com/repos/${{inputs.repo-owner}}/${{inputs.repo-name}}/actions/workflows/${{inputs.workflow-file}}/dispatches \
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token ${{inputs.auth-token}}" \
-d '{"ref": "main", "inputs": { "arc_name": "${{inputs.arc-name}}" } }'
- name: Fetch workflow run & job ids
uses: actions/github-script@v7
id: query_workflow
with:
script: |
// Try to find the workflow run triggered by the previous step using the workflow_dispatch event.
// - Find recently create workflow runs in the test repository
// - For each workflow run, list its workflow job and see if the job's labels contain `inputs.arc-name`
// - Since the inputs.arc-name should be unique per e2e workflow run, once we find the job with the label, we find the workflow that we just triggered.
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms))
}
const owner = '${{inputs.repo-owner}}'
const repo = '${{inputs.repo-name}}'
const workflow_id = '${{inputs.workflow-file}}'
let workflow_run_id = 0
let workflow_job_id = 0
let workflow_run_html_url = ""
let count = 0
while (count++<12) {
await sleep(10 * 1000);
let listRunResponse = await github.rest.actions.listWorkflowRuns({
owner: owner,
repo: repo,
workflow_id: workflow_id,
created: '>${{steps.queue_workflow.outputs.queue_time}}'
})
if (listRunResponse.data.total_count > 0) {
console.log(`Found some new workflow runs for ${workflow_id}`)
for (let i = 0; i<listRunResponse.data.total_count; i++) {
let workflowRun = listRunResponse.data.workflow_runs[i]
console.log(`Check if workflow run ${workflowRun.id} is triggered by us.`)
let listJobResponse = await github.rest.actions.listJobsForWorkflowRun({
owner: owner,
repo: repo,
run_id: workflowRun.id
})
console.log(`Workflow run ${workflowRun.id} has ${listJobResponse.data.total_count} jobs.`)
if (listJobResponse.data.total_count > 0) {
for (let j = 0; j<listJobResponse.data.total_count; j++) {
let workflowJob = listJobResponse.data.jobs[j]
console.log(`Check if workflow job ${workflowJob.id} is triggered by us.`)
console.log(JSON.stringify(workflowJob.labels));
if (workflowJob.labels.includes('${{inputs.arc-name}}')) {
console.log(`Workflow job ${workflowJob.id} (Run id: ${workflowJob.run_id}) is triggered by us.`)
workflow_run_id = workflowJob.run_id
workflow_job_id = workflowJob.id
workflow_run_html_url = workflowRun.html_url
break
}
}
}
if (workflow_job_id > 0) {
break;
}
}
}
if (workflow_job_id > 0) {
break;
}
}
if (workflow_job_id == 0) {
core.setFailed(`Can't find workflow run and workflow job triggered to 'runs-on ${{inputs.arc-name}}'`)
} else {
core.setOutput('workflow_run', workflow_run_id);
core.setOutput('workflow_job', workflow_job_id);
core.setOutput('workflow_run_url', workflow_run_html_url);
}
- name: Generate summary about the triggered workflow run
shell: bash
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Triggered workflow run** |
|:--------------------------:|
| ${{steps.query_workflow.outputs.workflow_run_url}} |
EOF
- name: Wait for workflow to start running
if: inputs.wait-to-running == 'true' && inputs.wait-to-finish == 'false'
uses: actions/github-script@v7
with:
script: |
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms))
}
const owner = '${{inputs.repo-owner}}'
const repo = '${{inputs.repo-name}}'
const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}}
const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}}
let count = 0
while (count++<10) {
await sleep(30 * 1000);
let getRunResponse = await github.rest.actions.getWorkflowRun({
owner: owner,
repo: repo,
run_id: workflow_run_id
})
console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`);
if (getRunResponse.data.status == 'in_progress') {
console.log(`Workflow run is in progress.`)
return
}
}
core.setFailed(`The triggered workflow run didn't start properly using ${{inputs.arc-name}}`)
- name: Wait for workflow to finish successfully
if: inputs.wait-to-finish == 'true'
uses: actions/github-script@v7
with:
script: |
// Wait 5 minutes and make sure the workflow run we triggered completed with result 'success'
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms))
}
const owner = '${{inputs.repo-owner}}'
const repo = '${{inputs.repo-name}}'
const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}}
const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}}
let count = 0
while (count++<10) {
await sleep(30 * 1000);
let getRunResponse = await github.rest.actions.getWorkflowRun({
owner: owner,
repo: repo,
run_id: workflow_run_id
})
console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`);
if (getRunResponse.data.status == 'completed') {
if ( getRunResponse.data.conclusion == 'success') {
console.log(`Workflow run finished properly.`)
return
} else {
core.setFailed(`The triggered workflow run finish with result ${getRunResponse.data.conclusion}`)
return
}
}
}
core.setFailed(`The triggered workflow run didn't finish properly using ${{inputs.arc-name}}`)
- name: Gather listener logs
shell: bash
if: always()
run: |
LISTENER_POD="$(kubectl get autoscalinglisteners.actions.github.com -n arc-systems -o jsonpath='{.items[*].metadata.name}')"
kubectl logs $LISTENER_POD -n ${{inputs.arc-controller-namespace}}
- name: Gather coredns logs
shell: bash
if: always()
run: |
kubectl logs deployments/coredns -n kube-system
- name: cleanup
if: inputs.wait-to-finish == 'true'
shell: bash
run: |
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
kubectl wait --timeout=30s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-namespace}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
- name: Gather controller logs
shell: bash
if: always()
run: |
kubectl logs deployment/arc-gha-rs-controller -n ${{inputs.arc-controller-namespace}}

View File

@@ -0,0 +1,65 @@
name: "Setup ARC E2E Test Action"
description: "Build controller image, create kind cluster, load the image, and exchange ARC configure token."
inputs:
app-id:
description: "GitHub App Id for exchange access token"
required: true
app-pk:
description: "GitHub App private key for exchange access token"
required: true
image-name:
description: "Local docker image name for building"
required: true
image-tag:
description: "Tag of ARC Docker image for building"
required: true
target-org:
description: "The test organization for ARC e2e test"
required: true
outputs:
token:
description: "Token to use for configure ARC"
value: ${{steps.config-token.outputs.token}}
runs:
using: "composite"
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
# failures pushing images to GHCR
version: v0.9.1
driver-opts: image=moby/buildkit:v0.10.6
- name: Build controller image
# https://github.com/docker/build-push-action/releases/tag/v6.18.0
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
with:
file: Dockerfile
platforms: linux/amd64
load: true
build-args: |
DOCKER_IMAGE_NAME=${{inputs.image-name}}
VERSION=${{inputs.image-tag}}
tags: |
${{inputs.image-name}}:${{inputs.image-tag}}
no-cache: true
- name: Create minikube cluster and load image
shell: bash
run: |
minikube start
minikube image load ${{inputs.image-name}}:${{inputs.image-tag}}
- name: Get configure token
id: config-token
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ inputs.app-id }}
application_private_key: ${{ inputs.app-pk }}
organization: ${{ inputs.target-org}}

View File

@@ -0,0 +1,51 @@
name: "Setup Docker"
inputs:
username:
description: "Username"
required: true
password:
description: "Password"
required: true
ghcr_username:
description: "GHCR username. Usually set from the github.actor variable"
required: true
ghcr_password:
description: "GHCR password. Usually set from the secrets.GITHUB_TOKEN variable"
required: true
runs:
using: "composite"
steps:
- name: Get Short SHA
id: vars
run: |
echo "sha_short=${GITHUB_SHA::7}" >> $GITHUB_ENV
shell: bash
- name: Set up QEMU
# https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
- name: Set up Docker Buildx
# https://github.com/docker/setup-buildx-action/releases/tag/v3.10.0
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
with:
version: latest
- name: Login to DockerHub
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }}
# https://github.com/docker/login-action/releases/tag/v3.4.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
with:
username: ${{ inputs.username }}
password: ${{ inputs.password }}
- name: Login to GitHub Container Registry
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }}
# https://github.com/docker/login-action/releases/tag/v3.4.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
with:
registry: ghcr.io
username: ${{ inputs.ghcr_username }}
password: ${{ inputs.ghcr_password }}

View File

@@ -40,7 +40,7 @@ jobs:
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
fetch-depth: 0
@@ -63,7 +63,7 @@ jobs:
python-version: "3.11"
- name: Set up chart-testing
uses: helm/chart-testing-action@6ec842c01de15ebb84c8627d2744a0c2f2755c9f
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
- name: Run chart-testing (list-changed)
id: list-changed
@@ -79,7 +79,7 @@ jobs:
- name: Create kind cluster
if: steps.list-changed.outputs.changed == 'true'
uses: helm/kind-action@92086f6be054225fa813e0a4b13787fc9088faab
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
# We need cert-manager already installed in the cluster because we assume the CRDs exist
- name: Install cert-manager
@@ -134,7 +134,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
fetch-depth: 0
@@ -184,7 +184,7 @@ jobs:
# this workaround is intended to move the index.yaml to the target repo
# where the github pages are hosted
- name: Checkout target repository
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
path: ${{ env.CHART_TARGET_REPO }}

View File

@@ -39,7 +39,7 @@ jobs:
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:

View File

@@ -19,7 +19,7 @@ env:
PUSH_TO_REGISTRIES: true
TARGET_ORG: actions-runner-controller
TARGET_WORKFLOW: release-runners.yaml
DOCKER_VERSION: 28.0.4
DOCKER_VERSION: 24.0.7
concurrency:
group: ${{ github.workflow }}
@@ -30,7 +30,7 @@ jobs:
name: Trigger Build and Push of Runner Images
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Get runner version
id: versions
run: |

View File

@@ -24,7 +24,7 @@ jobs:
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Get runner current and latest versions
id: runner_versions
@@ -69,7 +69,7 @@ jobs:
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: PR Name
id: pr_name
@@ -124,7 +124,7 @@ jobs:
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: New branch
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"

View File

@@ -40,7 +40,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
fetch-depth: 0
@@ -55,7 +55,7 @@ jobs:
python-version: "3.11"
- name: Set up chart-testing
uses: helm/chart-testing-action@6ec842c01de15ebb84c8627d2744a0c2f2755c9f
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
- name: Run chart-testing (list-changed)
id: list-changed
@@ -70,7 +70,7 @@ jobs:
ct lint --config charts/.ci/ct-config.yaml
- name: Create kind cluster
uses: helm/kind-action@92086f6be054225fa813e0a4b13787fc9088faab
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
if: steps.list-changed.outputs.changed == 'true'
# We need cert-manager already installed in the cluster because we assume the CRDs exist

View File

@@ -24,7 +24,7 @@ jobs:
name: runner / shellcheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: "Run shellcheck"
run: make shellcheck
@@ -33,7 +33,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Run tests
run: |

File diff suppressed because it is too large Load Diff

View File

@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
@@ -72,7 +72,7 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435
@@ -119,7 +119,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
@@ -166,7 +166,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}

View File

@@ -36,7 +36,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
fetch-depth: 0
@@ -51,7 +51,7 @@ jobs:
python-version: "3.11"
- name: Set up chart-testing
uses: helm/chart-testing-action@6ec842c01de15ebb84c8627d2744a0c2f2755c9f
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
- name: Run chart-testing (list-changed)
id: list-changed
@@ -88,7 +88,7 @@ jobs:
cache-to: type=gha,mode=max
- name: Create kind cluster
uses: helm/kind-action@92086f6be054225fa813e0a4b13787fc9088faab
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
if: steps.list-changed.outputs.changed == 'true'
with:
cluster_name: chart-testing
@@ -111,7 +111,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: "go.mod"

View File

@@ -55,7 +55,7 @@ jobs:
TARGET_REPO: actions-runner-controller
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Get Token
id: get_workflow_token
@@ -90,7 +90,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Login to GitHub Container Registry
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef
@@ -110,7 +110,7 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435

View File

@@ -25,7 +25,7 @@ jobs:
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Install Go
uses: actions/setup-go@v6

View File

@@ -16,7 +16,7 @@ jobs:
check_for_first_interaction:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- uses: actions/first-interaction@v3
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -29,7 +29,7 @@ jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: "go.mod"
@@ -42,13 +42,13 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: "go.mod"
cache: false
- name: golangci-lint
uses: golangci/golangci-lint-action@e7fa5ac41e1cf5b7d48e45e42232ce7ada589601
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9
with:
only-new-issues: true
version: v2.5.0
@@ -56,7 +56,7 @@ jobs:
generate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: "go.mod"
@@ -69,7 +69,7 @@ jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- uses: actions/setup-go@v6
with:
go-version-file: "go.mod"
@@ -78,7 +78,7 @@ jobs:
run: git diff --exit-code
- name: Install kubebuilder
run: |
curl -D headers.txt -fsL "https://storage.googleapis.com/kubebuilder-tools/kubebuilder-tools-1.30.0-linux-amd64.tar.gz" -o kubebuilder-tools
curl -D headers.txt -fsL "https://storage.googleapis.com/kubebuilder-tools/kubebuilder-tools-1.26.1-linux-amd64.tar.gz" -o kubebuilder-tools
echo "$(grep -i etag headers.txt -m 1 | cut -d'"' -f2) kubebuilder-tools" > sum
md5sum -c sum
tar -zvxf kubebuilder-tools

View File

@@ -6,7 +6,7 @@ endif
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
VERSION ?= dev
COMMIT_SHA = $(shell git rev-parse HEAD)
RUNNER_VERSION ?= 2.330.0
RUNNER_VERSION ?= 2.329.0
TARGETPLATFORM ?= $(shell arch)
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION}
@@ -210,6 +210,8 @@ docker-buildx:
docker buildx create --platform ${PLATFORMS} --name container-builder --use;\
fi
docker buildx build --platform ${PLATFORMS} \
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
--build-arg VERSION=${VERSION} \
--build-arg COMMIT_SHA=${COMMIT_SHA} \
-t "${DOCKER_IMAGE_NAME}:${VERSION}" \
@@ -295,10 +297,6 @@ acceptance/runner/startup:
e2e:
go test -count=1 -v -timeout 600s -run '^TestE2E$$' ./test/e2e
.PHONY: gha-e2e
gha-e2e:
bash hack/e2e-test.sh
# Upload release file to GitHub.
github-release: release
ghr ${VERSION} release/

View File

@@ -48,7 +48,7 @@ type EphemeralRunner struct {
}
func (er *EphemeralRunner) IsDone() bool {
return er.Status.Phase == corev1.PodSucceeded || er.Status.Phase == corev1.PodFailed
return er.Status.Phase == EphemeralRunnerSucceeded || er.Status.Phase == EphemeralRunnerFailed
}
func (er *EphemeralRunner) HasJob() bool {
@@ -125,6 +125,40 @@ type EphemeralRunnerSpec struct {
corev1.PodTemplateSpec `json:",inline"`
}
// EphemeralRunnerPhase is a label for the condition of an EphemeralRunner at the current time.
// +kubebuilder:validation:Enum=Pending;Running;Restarting;Succeeded;Failed;Aborted
type EphemeralRunnerPhase string
const (
// EphemeralRunnerPending is the stage where the ephemeral runner is about to start.
EphemeralRunnerPending EphemeralRunnerPhase = "Pending"
// EphemeralRunnerRunning is the stage where the ephemeral runner is running and ready to accept the job.
EphemeralRunnerRunning EphemeralRunnerPhase = "Running"
// EphemeralRunnerRestarting is the stage where the ephemeral runner pod stopped, so the ephemeral runner should restart it
EphemeralRunnerRestarting EphemeralRunnerPhase = "Restarting"
// EphemeralRunnerSucceeded is the stage where the ephemeral runner finished running and exited with exit code 0.
EphemeralRunnerSucceeded EphemeralRunnerPhase = "Succeeded"
// EphemeralRunnerFailed means that the ephemeral runner finished running and exited with a non-zero exit code.
EphemeralRunnerFailed EphemeralRunnerPhase = "Failed"
// EphemeralRunnerAborted means that the ephemeral runner failed to start due to unrecoverable failure, and will be left as is for manual inspection.
EphemeralRunnerAborted EphemeralRunnerPhase = "Aborted"
)
func EphemeralRunnerPhaseFromPodPhase(podPhase corev1.PodPhase) EphemeralRunnerPhase {
switch podPhase {
case corev1.PodPending:
return EphemeralRunnerPending
case corev1.PodRunning:
return EphemeralRunnerRunning
case corev1.PodSucceeded:
return EphemeralRunnerSucceeded
case corev1.PodFailed:
return EphemeralRunnerFailed
default:
return EphemeralRunnerPending
}
}
// EphemeralRunnerStatus defines the observed state of EphemeralRunner
type EphemeralRunnerStatus struct {
// Turns true only if the runner is online.
@@ -140,7 +174,7 @@ type EphemeralRunnerStatus struct {
// The PodSucceded phase should be set only when confirmed that EphemeralRunner
// actually executed the job and has been removed from the service.
// +optional
Phase corev1.PodPhase `json:"phase,omitempty"`
Phase EphemeralRunnerPhase `json:"phase,omitempty"`
// +optional
Reason string `json:"reason,omitempty"`
// +optional

View File

@@ -8257,6 +8257,13 @@ spec:
The PodSucceded phase should be set only when confirmed that EphemeralRunner
actually executed the job and has been removed from the service.
enum:
- Pending
- Running
- Restarting
- Succeeded
- Failed
- Aborted
type: string
ready:
description: Turns true only if the runner is online.

View File

@@ -53,7 +53,4 @@ rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "create", "delete"]
{{- with $containerMode.kubernetesModeAdditionalRoleRules}}
{{- toYaml . | nindent 2}}
{{- end }}
{{- end }}

View File

@@ -1,30 +0,0 @@
githubConfigUrl: https://github.com/actions/actions-runner-controller
githubConfigSecret:
github_token: test
template:
spec:
containers:
- name: other
image: other-image:latest
volumes:
- name: foo
emptyDir: {}
- name: bar
emptyDir: {}
- name: work
hostPath:
path: /data
type: Directory
containerMode:
type: kubernetes
kubernetesModeAdditionalRoleRule:
- apiGroups:
- apps
resources:
- deployments
verbs:
- get
- list
- create
- delete

View File

@@ -124,7 +124,6 @@ githubConfigSecret:
# resources:
# requests:
# storage: 1Gi
# kubernetesModeAdditionalRoleRules: []
#
## listenerTemplate is the PodSpec for each listener Pod

View File

@@ -8257,6 +8257,13 @@ spec:
The PodSucceded phase should be set only when confirmed that EphemeralRunner
actually executed the job and has been removed from the service.
enum:
- Pending
- Running
- Restarting
- Succeeded
- Failed
- Aborted
type: string
ready:
description: Turns true only if the runner is online.

View File

@@ -211,14 +211,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
// TODO: make sure the role binding has the up-to-date role and service account
listenerPod := new(corev1.Pod)
if err := r.Get(
ctx,
client.ObjectKey{
Namespace: autoscalingListener.Namespace,
Name: autoscalingListener.Name,
},
listenerPod,
); err != nil {
if err := r.Get(ctx, client.ObjectKey{Namespace: autoscalingListener.Namespace, Name: autoscalingListener.Name}, listenerPod); err != nil {
if !kerrors.IsNotFound(err) {
log.Error(err, "Unable to get listener pod", "namespace", autoscalingListener.Namespace, "name", autoscalingListener.Name)
return ctrl.Result{}, err
@@ -236,45 +229,12 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
cs := listenerContainerStatus(listenerPod)
switch {
case listenerPod.Status.Reason == "Evicted":
log.Info(
"Listener pod is evicted",
"phase", listenerPod.Status.Phase,
"reason", listenerPod.Status.Reason,
"message", listenerPod.Status.Message,
)
return ctrl.Result{}, r.deleteListenerPod(ctx, autoscalingListener, listenerPod, log)
case cs == nil:
log.Info("Listener pod is not ready", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return ctrl.Result{}, nil
case cs.State.Terminated != nil:
log.Info(
"Listener pod is terminated",
"namespace", listenerPod.Namespace,
"name", listenerPod.Name,
"reason", cs.State.Terminated.Reason,
"message", cs.State.Terminated.Message,
)
log.Info("Listener pod is terminated", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", cs.State.Terminated.Reason, "message", cs.State.Terminated.Message)
return ctrl.Result{}, r.deleteListenerPod(ctx, autoscalingListener, listenerPod, log)
case cs.State.Running != nil:
if err := r.publishRunningListener(autoscalingListener, true); err != nil {
log.Error(err, "Unable to publish running listener", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
// stop reconciling. We should never get to this point but if we do,
// listener won't be able to start up, and the crash from the pod should
// notify the reconciler again.
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
func (r *AutoscalingListenerReconciler) deleteListenerPod(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerPod *corev1.Pod, log logr.Logger) error {
if err := r.publishRunningListener(autoscalingListener, false); err != nil {
log.Error(err, "Unable to publish runner listener down metric", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
}
@@ -283,7 +243,7 @@ func (r *AutoscalingListenerReconciler) deleteListenerPod(ctx context.Context, a
log.Info("Deleting the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return err
return ctrl.Result{}, err
}
// delete the listener config secret as well, so it gets recreated when the listener pod is recreated, with any new data if it exists
@@ -293,13 +253,24 @@ func (r *AutoscalingListenerReconciler) deleteListenerPod(ctx context.Context, a
case err == nil && configSecret.DeletionTimestamp.IsZero():
log.Info("Deleting the listener config secret")
if err := r.Delete(ctx, &configSecret); err != nil {
return fmt.Errorf("failed to delete listener config secret: %w", err)
return ctrl.Result{}, fmt.Errorf("failed to delete listener config secret: %w", err)
}
case !kerrors.IsNotFound(err):
return fmt.Errorf("failed to get the listener config secret: %w", err)
return ctrl.Result{}, fmt.Errorf("failed to get the listener config secret: %w", err)
}
}
return nil
return ctrl.Result{}, nil
case cs.State.Running != nil:
if err := r.publishRunningListener(autoscalingListener, true); err != nil {
log.Error(err, "Unable to publish running listener", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
// stop reconciling. We should never get to this point but if we do,
// listener won't be able to start up, and the crash from the pod should
// notify the reconciler again.
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (requeue bool, err error) {

View File

@@ -28,6 +28,7 @@ import (
const (
autoscalingListenerTestTimeout = time.Second * 20
autoscalingListenerTestInterval = time.Millisecond * 250
autoscalingListenerTestGitHubToken = "gh_token"
)
var _ = Describe("Test AutoScalingListener controller", func() {
@@ -671,55 +672,6 @@ var _ = Describe("Test AutoScalingListener customization", func() {
autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be created")
})
It("Should re-create pod when the listener pod is evicted", func() {
pod := new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(
ctx,
client.ObjectKey{
Name: autoscalingListener.Name,
Namespace: autoscalingListener.Namespace,
},
pod,
)
if err != nil {
return "", err
}
return pod.Name, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(
BeEquivalentTo(autoscalingListener.Name),
"Pod should be created",
)
updated := pod.DeepCopy()
oldPodUID := string(pod.UID)
updated.Status.Reason = "Evicted"
err := k8sClient.Status().Update(ctx, updated)
Expect(err).NotTo(HaveOccurred(), "failed to update pod status")
pod = new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return string(pod.UID), nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).ShouldNot(
BeEquivalentTo(oldPodUID),
"Pod should be created",
)
})
})
})

View File

@@ -48,7 +48,7 @@ const (
annotationKeyValuesHash = "actions.github.com/values-hash"
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
runnerScaleSetIDAnnotationKey = "runner-scale-set-id"
runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
)
type UpdateStrategy string
@@ -180,14 +180,14 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, nil
}
scaleSetIDRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIDAnnotationKey]
scaleSetIdRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
if !ok {
// Need to create a new runner scale set on Actions service
log.Info("Runner scale set id annotation does not exist. Creating a new runner scale set.")
return r.createRunnerScaleSet(ctx, autoscalingRunnerSet, log)
}
if id, err := strconv.Atoi(scaleSetIDRaw); err != nil || id <= 0 {
if id, err := strconv.Atoi(scaleSetIdRaw); err != nil || id <= 0 {
log.Info("Runner scale set id annotation is not an id, or is <= 0. Creating a new runner scale set.")
// something modified the scaleSetId. Try to create one
return r.createRunnerScaleSet(ctx, autoscalingRunnerSet, log)
@@ -403,7 +403,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
return ctrl.Result{}, err
}
runnerGroupID := 1
runnerGroupId := 1
if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 {
runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup)
if err != nil {
@@ -411,14 +411,14 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
return ctrl.Result{}, err
}
runnerGroupID = int(runnerGroup.ID)
runnerGroupId = int(runnerGroup.ID)
}
runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, runnerGroupID, autoscalingRunnerSet.Spec.RunnerScaleSetName)
runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, runnerGroupId, autoscalingRunnerSet.Spec.RunnerScaleSetName)
if err != nil {
logger.Error(err, "Failed to get runner scale set from Actions service",
"runnerGroupId",
strconv.Itoa(runnerGroupID),
strconv.Itoa(runnerGroupId),
"runnerScaleSetName",
autoscalingRunnerSet.Spec.RunnerScaleSetName)
return ctrl.Result{}, err
@@ -429,7 +429,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
ctx,
&actions.RunnerScaleSet{
Name: autoscalingRunnerSet.Spec.RunnerScaleSetName,
RunnerGroupId: runnerGroupID,
RunnerGroupId: runnerGroupId,
Labels: []actions.Label{
{
Name: autoscalingRunnerSet.Spec.RunnerScaleSetName,
@@ -466,7 +466,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
logger.Info("Adding runner scale set ID, name and runner group name as an annotation and url labels")
if err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Annotations[AnnotationKeyGitHubRunnerScaleSetName] = runnerScaleSet.Name
obj.Annotations[runnerScaleSetIDAnnotationKey] = strconv.Itoa(runnerScaleSet.Id)
obj.Annotations[runnerScaleSetIdAnnotationKey] = strconv.Itoa(runnerScaleSet.Id)
obj.Annotations[AnnotationKeyGitHubRunnerGroupName] = runnerScaleSet.RunnerGroupName
if err := applyGitHubURLLabels(obj.Spec.GitHubConfigUrl, obj.Labels); err != nil { // should never happen
logger.Error(err, "Failed to apply GitHub URL labels")
@@ -484,7 +484,7 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
}
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
runnerScaleSetID, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIDAnnotationKey])
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
logger.Error(err, "Failed to parse runner scale set ID")
return ctrl.Result{}, err
@@ -496,7 +496,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
return ctrl.Result{}, err
}
runnerGroupID := 1
runnerGroupId := 1
if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 {
runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup)
if err != nil {
@@ -504,12 +504,12 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
return ctrl.Result{}, err
}
runnerGroupID = int(runnerGroup.ID)
runnerGroupId = int(runnerGroup.ID)
}
updatedRunnerScaleSet, err := actionsClient.UpdateRunnerScaleSet(ctx, runnerScaleSetID, &actions.RunnerScaleSet{RunnerGroupId: runnerGroupID})
updatedRunnerScaleSet, err := actionsClient.UpdateRunnerScaleSet(ctx, runnerScaleSetId, &actions.RunnerScaleSet{RunnerGroupId: runnerGroupId})
if err != nil {
logger.Error(err, "Failed to update runner scale set", "runnerScaleSetId", runnerScaleSetID)
logger.Error(err, "Failed to update runner scale set", "runnerScaleSetId", runnerScaleSetId)
return ctrl.Result{}, err
}
@@ -527,7 +527,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
}
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
runnerScaleSetID, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIDAnnotationKey])
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
logger.Error(err, "Failed to parse runner scale set ID")
return ctrl.Result{}, err
@@ -544,9 +544,9 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
return ctrl.Result{}, err
}
updatedRunnerScaleSet, err := actionsClient.UpdateRunnerScaleSet(ctx, runnerScaleSetID, &actions.RunnerScaleSet{Name: autoscalingRunnerSet.Spec.RunnerScaleSetName})
updatedRunnerScaleSet, err := actionsClient.UpdateRunnerScaleSet(ctx, runnerScaleSetId, &actions.RunnerScaleSet{Name: autoscalingRunnerSet.Spec.RunnerScaleSetName})
if err != nil {
logger.Error(err, "Failed to update runner scale set", "runnerScaleSetId", runnerScaleSetID)
logger.Error(err, "Failed to update runner scale set", "runnerScaleSetId", runnerScaleSetId)
return ctrl.Result{}, err
}
@@ -563,7 +563,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
}
func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
scaleSetID, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIDAnnotationKey]
scaleSetId, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
if !ok {
// Annotation not being present can occur in 3 scenarios
// 1. Scale set is never created.
@@ -580,7 +580,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
return nil
}
logger.Info("Deleting the runner scale set from Actions service")
runnerScaleSetID, err := strconv.Atoi(scaleSetID)
runnerScaleSetId, err := strconv.Atoi(scaleSetId)
if err != nil {
// If the annotation is not set correctly, we are going to get stuck in a loop trying to parse the scale set id.
// If the configuration is invalid (secret does not exist for example), we never got to the point to create runner set.
@@ -595,17 +595,17 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
return err
}
err = actionsClient.DeleteRunnerScaleSet(ctx, runnerScaleSetID)
err = actionsClient.DeleteRunnerScaleSet(ctx, runnerScaleSetId)
if err != nil {
logger.Error(err, "Failed to delete runner scale set", "runnerScaleSetId", runnerScaleSetID)
logger.Error(err, "Failed to delete runner scale set", "runnerScaleSetId", runnerScaleSetId)
return err
}
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
delete(obj.Annotations, runnerScaleSetIDAnnotationKey)
delete(obj.Annotations, runnerScaleSetIdAnnotationKey)
})
if err != nil {
logger.Error(err, "Failed to patch autoscaling runner set with annotation removed", "annotation", runnerScaleSetIDAnnotationKey)
logger.Error(err, "Failed to patch autoscaling runner set with annotation removed", "annotation", runnerScaleSetIdAnnotationKey)
return err
}
@@ -1006,7 +1006,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinali
// NOTE: if this is logic should be used for other resources,
// consider using generics
type EphemeralRunnerSets struct {
list *v1alpha1.EphemeralRunnerSetList
sorted bool

View File

@@ -36,6 +36,7 @@ import (
const (
autoscalingRunnerSetTestTimeout = time.Second * 20
autoscalingRunnerSetTestInterval = time.Millisecond * 250
autoscalingRunnerSetTestGitHubToken = "gh_token"
)
var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
@@ -140,7 +141,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
return "", err
}
if _, ok := created.Annotations[runnerScaleSetIDAnnotationKey]; !ok {
if _, ok := created.Annotations[runnerScaleSetIdAnnotationKey]; !ok {
return "", nil
}
@@ -148,7 +149,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
return "", nil
}
return fmt.Sprintf("%s_%s", created.Annotations[runnerScaleSetIDAnnotationKey], created.Annotations[AnnotationKeyGitHubRunnerGroupName]), nil
return fmt.Sprintf("%s_%s", created.Annotations[runnerScaleSetIdAnnotationKey], created.Annotations[AnnotationKeyGitHubRunnerGroupName]), nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("1_testgroup"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation")

View File

@@ -36,8 +36,7 @@ const (
LabelKeyGitHubRepository = "actions.github.com/repository"
)
// AutoscalingRunnerSetCleanupFinalizerName is a finalizer used to protect resources
// from deletion while AutoscalingRunnerSet is running
// Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running
const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
const (

View File

@@ -22,7 +22,6 @@ import (
"fmt"
"net/http"
"strconv"
"strings"
"time"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
@@ -154,17 +153,31 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil
}
addFinalizers := !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) || !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName)
if addFinalizers {
log.Info("Adding finalizers")
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
log.Info("Adding finalizer")
if err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
controllerutil.AddFinalizer(obj, ephemeralRunnerFinalizerName)
controllerutil.AddFinalizer(obj, ephemeralRunnerActionsFinalizerName)
}); err != nil {
log.Error(err, "Failed to update with finalizer set")
return ctrl.Result{}, err
}
log.Info("Successfully added finalizers")
log.Info("Successfully added finalizer")
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) {
log.Info("Adding runner registration finalizer")
err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
controllerutil.AddFinalizer(obj, ephemeralRunnerActionsFinalizerName)
})
if err != nil {
log.Error(err, "Failed to update with runner registration finalizer set")
return ctrl.Result{}, err
}
log.Info("Successfully added runner registration finalizer")
return ctrl.Result{}, nil
}
secret := new(corev1.Secret)
@@ -269,37 +282,10 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
case kerrors.IsAlreadyExists(err):
log.Info("Runner pod already exists. Waiting for the pod event to be received")
return ctrl.Result{Requeue: true, RequeueAfter: 5 * time.Second}, nil
case kerrors.IsInvalid(err):
case kerrors.IsInvalid(err) || kerrors.IsForbidden(err):
log.Error(err, "Failed to create a pod due to unrecoverable failure")
errMessage := fmt.Sprintf("Failed to create the pod: %v", err)
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonInvalidPodFailure, log); err != nil {
log.Error(err, "Failed to set ephemeral runner to phase Failed")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
case kerrors.IsForbidden(err):
if status, ok := err.(kerrors.APIStatus); ok || errors.As(err, &status) {
isResourceQuotaExceeded := strings.Contains(status.Status().Message, "exceeded quota:")
isAboutToExpire := ephemeralRunner.CreationTimestamp.Time.Add(10 * time.Minute).Before(time.Now())
switch {
case isResourceQuotaExceeded && isAboutToExpire:
log.Error(err, "Failed to create a pod due to resource quota exceeded and the ephemeral runner is about to expire; re-creating the ephemeral runner")
if err := r.Delete(ctx, ephemeralRunner); err != nil {
log.Error(err, "Failed to delete the ephemeral runner")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
case isResourceQuotaExceeded:
log.Error(err, "Resource quota is exceeded; requeue in 30s to retry pod creation")
return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
default:
// other forbidden errors
// fallthrough to the default handling below
}
}
log.Error(err, "Failed to create a pod due to unrecoverable failure")
errMessage := fmt.Sprintf("Failed to create the pod: %v", err)
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonInvalidPodFailure, log); err != nil {
if err := r.markAsAborted(ctx, ephemeralRunner, errMessage, ReasonInvalidPodFailure, log); err != nil {
log.Error(err, "Failed to set ephemeral runner to phase Failed")
return ctrl.Result{}, err
}
@@ -525,10 +511,12 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
return errors.Join(errs...)
}
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, errMessage string, reason string, log logr.Logger) error {
// markAsAborted updates the ephemeral runner status to aborted and stops the reconciliation.
// This runner is left in aborted state and won't be deleted until someone manually does that.
func (r *EphemeralRunnerReconciler) markAsAborted(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, errMessage string, reason string, log logr.Logger) error {
log.Info("Updating ephemeral runner status to Failed")
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = corev1.PodFailed
obj.Status.Phase = v1alpha1.EphemeralRunnerAborted
obj.Status.Reason = reason
obj.Status.Message = errMessage
}); err != nil {
@@ -559,6 +547,7 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
if obj.Status.Failures == nil {
obj.Status.Failures = make(map[string]metav1.Time)
}
obj.Status.Phase = v1alpha1.EphemeralRunnerRestarting
obj.Status.Failures[string(pod.UID)] = metav1.Now()
obj.Status.Ready = false
obj.Status.Reason = pod.Status.Reason
@@ -689,7 +678,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
}
log.Info("Creating new pod for ephemeral runner")
newPod := r.newEphemeralRunnerPod(runner, secret, envs...)
newPod := r.newEphemeralRunnerPod(ctx, runner, secret, envs...)
if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil {
log.Error(err, "Failed to set controller reference to a new pod")
@@ -748,7 +737,8 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
}
}
phaseChanged := ephemeralRunner.Status.Phase != pod.Status.Phase
newPhase := v1alpha1.EphemeralRunnerPhaseFromPodPhase(pod.Status.Phase)
phaseChanged := ephemeralRunner.Status.Phase != newPhase
readyChanged := ready != ephemeralRunner.Status.Ready
if !phaseChanged && !readyChanged {
@@ -763,7 +753,7 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
"ready", ready,
)
err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = pod.Status.Phase
obj.Status.Phase = newPhase
obj.Status.Ready = ready
obj.Status.Reason = pod.Status.Reason
obj.Status.Message = pod.Status.Message

View File

@@ -270,7 +270,7 @@ var _ = Describe("EphemeralRunner", func() {
updated := new(v1alpha1.EphemeralRunner)
Eventually(
func() (corev1.PodPhase, error) {
func() (v1alpha1.EphemeralRunnerPhase, error) {
err := k8sClient.Get(
ctx,
client.ObjectKey{Name: invalideEphemeralRunner.Name, Namespace: invalideEphemeralRunner.Namespace},
@@ -283,7 +283,7 @@ var _ = Describe("EphemeralRunner", func() {
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(corev1.PodFailed))
).Should(BeEquivalentTo(v1alpha1.EphemeralRunnerAborted))
Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
@@ -470,7 +470,7 @@ var _ = Describe("EphemeralRunner", func() {
var updated *v1alpha1.EphemeralRunner
Eventually(
func() (corev1.PodPhase, error) {
func() (v1alpha1.EphemeralRunnerPhase, error) {
updated = new(v1alpha1.EphemeralRunner)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
if err != nil {
@@ -480,7 +480,7 @@ var _ = Describe("EphemeralRunner", func() {
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(phase))
).Should(BeEquivalentTo(v1alpha1.EphemeralRunnerPhase(phase)))
}
})
@@ -592,10 +592,10 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).To(BeNil(), "failed to patch pod status")
Consistently(
func() (corev1.PodPhase, error) {
func() (v1alpha1.EphemeralRunnerPhase, error) {
updated := new(v1alpha1.EphemeralRunner)
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated); err != nil {
return corev1.PodUnknown, err
return "", err
}
return updated.Status.Phase, nil
},
@@ -772,7 +772,7 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).To(BeNil())
Eventually(
func() (corev1.PodPhase, error) {
func() (v1alpha1.EphemeralRunnerPhase, error) {
updated := new(v1alpha1.EphemeralRunner)
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated); err != nil {
return "", err
@@ -781,7 +781,7 @@ var _ = Describe("EphemeralRunner", func() {
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(corev1.PodRunning))
).Should(BeEquivalentTo(v1alpha1.EphemeralRunnerRunning))
// set phase to succeeded
pod.Status.Phase = corev1.PodSucceeded
@@ -789,7 +789,7 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).To(BeNil())
Consistently(
func() (corev1.PodPhase, error) {
func() (v1alpha1.EphemeralRunnerPhase, error) {
updated := new(v1alpha1.EphemeralRunner)
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated); err != nil {
return "", err
@@ -797,7 +797,7 @@ var _ = Describe("EphemeralRunner", func() {
return updated.Status.Phase, nil
},
ephemeralRunnerTimeout,
).Should(BeEquivalentTo(corev1.PodRunning))
).Should(BeEquivalentTo(v1alpha1.EphemeralRunnerRunning))
})
})

View File

@@ -162,7 +162,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
"pending", len(ephemeralRunnerState.pending),
"running", len(ephemeralRunnerState.running),
"finished", len(ephemeralRunnerState.finished),
"failed", len(ephemeralRunnerState.failed),
"failed", len(ephemeralRunnerState.aborted),
"deleting", len(ephemeralRunnerState.deleting),
)
@@ -185,7 +185,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
},
len(ephemeralRunnerState.pending),
len(ephemeralRunnerState.running),
len(ephemeralRunnerState.failed),
len(ephemeralRunnerState.aborted),
)
}
@@ -232,7 +232,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
CurrentReplicas: total,
PendingEphemeralRunners: len(ephemeralRunnerState.pending),
RunningEphemeralRunners: len(ephemeralRunnerState.running),
FailedEphemeralRunners: len(ephemeralRunnerState.failed),
FailedEphemeralRunners: len(ephemeralRunnerState.aborted),
}
// Update the status if needed.
@@ -307,13 +307,13 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
"pending", len(ephemeralRunnerState.pending),
"running", len(ephemeralRunnerState.running),
"finished", len(ephemeralRunnerState.finished),
"failed", len(ephemeralRunnerState.failed),
"failed", len(ephemeralRunnerState.aborted),
"deleting", len(ephemeralRunnerState.deleting),
)
log.Info("Cleanup finished or failed ephemeral runners")
var errs []error
for _, ephemeralRunner := range append(ephemeralRunnerState.finished, ephemeralRunnerState.failed...) {
for _, ephemeralRunner := range append(ephemeralRunnerState.finished, ephemeralRunnerState.aborted...) {
log.Info("Deleting ephemeral runner", "name", ephemeralRunner.Name)
if err := r.Delete(ctx, ephemeralRunner); err != nil && !kerrors.IsNotFound(err) {
errs = append(errs, err)
@@ -564,7 +564,7 @@ type ephemeralRunnerState struct {
pending []*v1alpha1.EphemeralRunner
running []*v1alpha1.EphemeralRunner
finished []*v1alpha1.EphemeralRunner
failed []*v1alpha1.EphemeralRunner
aborted []*v1alpha1.EphemeralRunner
deleting []*v1alpha1.EphemeralRunner
latestPatchID int
@@ -585,12 +585,12 @@ func newEphemeralRunnerState(ephemeralRunnerList *v1alpha1.EphemeralRunnerList)
}
switch r.Status.Phase {
case corev1.PodRunning:
case v1alpha1.EphemeralRunnerRunning:
ephemeralRunnerState.running = append(ephemeralRunnerState.running, r)
case corev1.PodSucceeded:
case v1alpha1.EphemeralRunnerSucceeded, v1alpha1.EphemeralRunnerFailed:
ephemeralRunnerState.finished = append(ephemeralRunnerState.finished, r)
case corev1.PodFailed:
ephemeralRunnerState.failed = append(ephemeralRunnerState.failed, r)
case v1alpha1.EphemeralRunnerAborted:
ephemeralRunnerState.aborted = append(ephemeralRunnerState.aborted, r)
default:
// Pending or no phase should be considered as pending.
//
@@ -603,5 +603,5 @@ func newEphemeralRunnerState(ephemeralRunnerList *v1alpha1.EphemeralRunnerList)
}
func (s *ephemeralRunnerState) scaleTotal() int {
return len(s.pending) + len(s.running) + len(s.failed)
return len(s.pending) + len(s.running) + len(s.aborted)
}

View File

@@ -34,6 +34,7 @@ import (
const (
ephemeralRunnerSetTestTimeout = time.Second * 20
ephemeralRunnerSetTestInterval = time.Millisecond * 250
ephemeralRunnerSetTestGitHubToken = "gh_token"
)
func TestPrecomputedConstants(t *testing.T) {
@@ -118,7 +119,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Consistently(
func() (int, error) {
runnerList := new(v1alpha1.EphemeralRunnerList)
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -151,7 +153,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Eventually(
func() (int, error) {
runnerList := new(v1alpha1.EphemeralRunnerList)
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -160,7 +163,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
for i, runner := range runnerList.Items {
if runner.Status.RunnerId == 0 {
updatedRunner := runner.DeepCopy()
updatedRunner.Status.Phase = corev1.PodRunning
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerRunning
updatedRunner.Status.RunnerId = i + 100
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -169,7 +172,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
}
if refetch {
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
}
@@ -211,7 +215,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Eventually(
func() (int, error) {
runnerList := new(v1alpha1.EphemeralRunnerList)
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -220,7 +225,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
for i, runner := range runnerList.Items {
if runner.Status.RunnerId == 0 {
updatedRunner := runner.DeepCopy()
updatedRunner.Status.Phase = corev1.PodRunning
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerRunning
updatedRunner.Status.RunnerId = i + 100
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -229,7 +234,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
}
if refetch {
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
}
@@ -247,7 +253,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Eventually(
func() (int, error) {
runnerList := new(v1alpha1.EphemeralRunnerList)
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -292,7 +299,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -318,7 +326,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -342,7 +351,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -369,7 +378,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -381,12 +390,12 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
updatedRunner := runnerList.Items[0].DeepCopy()
updatedRunner.Status.Phase = corev1.PodSucceeded
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerSucceeded
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
updatedRunner = runnerList.Items[1].DeepCopy()
updatedRunner.Status.Phase = corev1.PodRunning
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerRunning
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -394,7 +403,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -420,7 +429,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -447,7 +456,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -459,12 +468,12 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
updatedRunner := runnerList.Items[0].DeepCopy()
updatedRunner.Status.Phase = corev1.PodSucceeded
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerRunning
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
updatedRunner = runnerList.Items[1].DeepCopy()
updatedRunner.Status.Phase = corev1.PodPending
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPending
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -472,7 +481,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Consistently(
func() (int, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -499,7 +508,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -511,12 +520,12 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
updatedRunner := runnerList.Items[0].DeepCopy()
updatedRunner.Status.Phase = corev1.PodSucceeded
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerSucceeded
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
updatedRunner = runnerList.Items[1].DeepCopy()
updatedRunner.Status.Phase = corev1.PodRunning
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerRunning
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -536,7 +545,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// We should have 3 runners, and have no Succeeded ones
Eventually(
func() error {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return err
}
@@ -546,7 +555,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
}
for _, runner := range runnerList.Items {
if runner.Status.Phase == corev1.PodSucceeded {
if runner.Status.Phase == v1alpha1.EphemeralRunnerSucceeded {
return fmt.Errorf("Runner %s is in Succeeded phase", runner.Name)
}
}
@@ -573,7 +582,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -585,12 +594,12 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
updatedRunner := runnerList.Items[0].DeepCopy()
updatedRunner.Status.Phase = corev1.PodSucceeded
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerSucceeded
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
updatedRunner = runnerList.Items[1].DeepCopy()
updatedRunner.Status.Phase = corev1.PodPending
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPending
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -598,7 +607,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() error {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return err
}
@@ -606,9 +615,9 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
succeeded := 0
for _, runner := range runnerList.Items {
switch runner.Status.Phase {
case corev1.PodSucceeded:
case v1alpha1.EphemeralRunnerSucceeded:
succeeded++
case corev1.PodPending:
case v1alpha1.EphemeralRunnerPending:
pending++
}
}
@@ -639,7 +648,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// We should have 1 runner up and pending
Eventually(
func() error {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return err
}
@@ -648,7 +657,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
return fmt.Errorf("Expected 1 runner, got %d", len(runnerList.Items))
}
if runnerList.Items[0].Status.Phase != corev1.PodPending {
if runnerList.Items[0].Status.Phase != v1alpha1.EphemeralRunnerPending {
return fmt.Errorf("Expected runner to be in Pending, got %s", runnerList.Items[0].Status.Phase)
}
@@ -660,13 +669,13 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Now, the ephemeral runner finally is done and we can scale down to 0
updatedRunner = runnerList.Items[0].DeepCopy()
updatedRunner.Status.Phase = corev1.PodSucceeded
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerSucceeded
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
Eventually(
func() (int, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -693,7 +702,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -706,12 +715,12 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Put one runner in Pending and one in Running
updatedRunner := runnerList.Items[0].DeepCopy()
updatedRunner.Status.Phase = corev1.PodPending
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPending
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
updatedRunner = runnerList.Items[1].DeepCopy()
updatedRunner.Status.Phase = corev1.PodRunning
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerRunning
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -719,7 +728,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() error {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return err
}
@@ -729,9 +738,9 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
for _, runner := range runnerList.Items {
switch runner.Status.Phase {
case corev1.PodPending:
case v1alpha1.EphemeralRunnerPending:
pending++
case corev1.PodRunning:
case v1alpha1.EphemeralRunnerRunning:
running++
}
@@ -763,7 +772,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Consistently(
func() (int, error) {
if err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace); err != nil {
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -789,7 +799,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -816,7 +826,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (int, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -829,12 +839,12 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Put one runner in Succeeded and one in Running
updatedRunner := runnerList.Items[0].DeepCopy()
updatedRunner.Status.Phase = corev1.PodSucceeded
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerSucceeded
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
updatedRunner = runnerList.Items[1].DeepCopy()
updatedRunner.Status.Phase = corev1.PodRunning
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerRunning
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -843,7 +853,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() error {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return err
}
@@ -853,9 +863,9 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
for _, runner := range runnerList.Items {
switch runner.Status.Phase {
case corev1.PodSucceeded:
case v1alpha1.EphemeralRunnerSucceeded:
succeeded++
case corev1.PodRunning:
case v1alpha1.EphemeralRunnerRunning:
running++
}
}
@@ -887,7 +897,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList = new(v1alpha1.EphemeralRunnerList)
Eventually(
func() error {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return err
}
@@ -897,7 +907,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
}
for _, runner := range runnerList.Items {
if runner.Status.Phase == corev1.PodSucceeded {
if runner.Status.Phase == v1alpha1.EphemeralRunnerSucceeded {
return fmt.Errorf("Expected no runners in Succeeded phase, got one")
}
}
@@ -928,7 +938,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(
func() (bool, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return false, err
}
@@ -962,7 +972,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
pending := pendingOriginal.DeepCopy()
pending.Status.RunnerId = 101
pending.Status.Phase = corev1.PodPending
pending.Status.Phase = v1alpha1.EphemeralRunnerPending
err = k8sClient.Status().Patch(ctx, pending, client.MergeFrom(pendingOriginal))
if err != nil {
@@ -976,7 +986,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
empty = empty[1:]
running := runningOriginal.DeepCopy()
running.Status.RunnerId = 102
running.Status.Phase = corev1.PodRunning
running.Status.Phase = v1alpha1.EphemeralRunnerRunning
err = k8sClient.Status().Patch(ctx, running, client.MergeFrom(runningOriginal))
if err != nil {
@@ -990,7 +1000,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
failed := pendingOriginal.DeepCopy()
failed.Status.RunnerId = 103
failed.Status.Phase = corev1.PodFailed
failed.Status.Phase = v1alpha1.EphemeralRunnerAborted
err = k8sClient.Status().Patch(ctx, failed, client.MergeFrom(failedOriginal))
if err != nil {
@@ -1036,7 +1046,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
Eventually(
func() (int, error) {
runnerList = new(v1alpha1.EphemeralRunnerList)
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -1198,7 +1208,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
Eventually(func(g Gomega) {
runnerList := new(v1alpha1.EphemeralRunnerList)
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
g.Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunners")
for _, runner := range runnerList.Items {
@@ -1216,7 +1226,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
Eventually(
func(g Gomega) (int, error) {
runnerList := new(v1alpha1.EphemeralRunnerList)
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -1226,7 +1236,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
for i, runner := range runnerList.Items {
if runner.Status.RunnerId == 0 {
updatedRunner := runner.DeepCopy()
updatedRunner.Status.Phase = corev1.PodSucceeded
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerSucceeded
updatedRunner.Status.RunnerId = i + 100
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -1235,7 +1245,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
}
if refetch {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -1250,18 +1260,6 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
err = k8sClient.Delete(ctx, ephemeralRunnerSet)
Expect(err).NotTo(HaveOccurred(), "failed to delete EphemeralRunnerSet")
Eventually(func(g Gomega) (int, error) {
runnerList := new(v1alpha1.EphemeralRunnerList)
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
if err != nil {
return -1, err
}
return len(runnerList.Items), nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
).Should(BeEquivalentTo(0), "EphemeralRunners should be deleted")
// Assert that the proxy secret is deleted
Eventually(func(g Gomega) {
proxySecret := &corev1.Secret{}
@@ -1345,7 +1343,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(func() (int, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -1357,7 +1355,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
).Should(BeEquivalentTo(1), "failed to create ephemeral runner")
runner := runnerList.Items[0].DeepCopy()
runner.Status.Phase = corev1.PodRunning
runner.Status.Phase = v1alpha1.EphemeralRunnerRunning
runner.Status.RunnerId = 100
err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status")
@@ -1492,7 +1490,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
runnerList := new(v1alpha1.EphemeralRunnerList)
Eventually(func() (int, error) {
err := listEphemeralRunnersAndRemoveFinalizers(ctx, k8sClient, runnerList, ephemeralRunnerSet.Namespace)
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
if err != nil {
return -1, err
}
@@ -1507,7 +1505,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
Expect(runner.Spec.GitHubServerTLS).NotTo(BeNil(), "runner tls config should not be nil")
Expect(runner.Spec.GitHubServerTLS).To(BeEquivalentTo(ephemeralRunnerSet.Spec.EphemeralRunnerSpec.GitHubServerTLS), "runner tls config should be correct")
runner.Status.Phase = corev1.PodRunning
runner.Status.Phase = v1alpha1.EphemeralRunnerRunning
runner.Status.RunnerId = 100
err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status")
@@ -1531,27 +1529,3 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
).Should(BeTrue(), "server was not called")
})
})
// helper function to remove ephemeral runners since in the test, ephemeral runner reconciler is not started
func listEphemeralRunnersAndRemoveFinalizers(ctx context.Context, k8sClient client.Client, list *v1alpha1.EphemeralRunnerList, namespace string) error {
err := k8sClient.List(ctx, list, client.InNamespace(namespace))
if err != nil {
return err
}
// Since we are not starting ephemeral runner reconciler, ignore
liveItems := make([]v1alpha1.EphemeralRunner, 0)
for _, item := range list.Items {
if !item.DeletionTimestamp.IsZero() {
if err := patch(ctx, k8sClient, &item, func(runner *v1alpha1.EphemeralRunner) {
runner.Finalizers = []string{}
}); err != nil {
return err
}
continue
}
liveItems = append(liveItems, item)
}
list.Items = liveItems
return nil
}

View File

@@ -42,11 +42,11 @@ func createNamespace(t ginkgo.GinkgoTInterface, client client.Client) (*corev1.N
ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling" + RandStringRunes(5)},
}
err := client.Create(context.Background(), ns)
err := k8sClient.Create(context.Background(), ns)
require.NoError(t, err)
t.Cleanup(func() {
err := client.Delete(context.Background(), ns)
err := k8sClient.Delete(context.Background(), ns)
require.NoError(t, err)
})

View File

@@ -2,6 +2,7 @@ package actionsgithubcom
import (
"bytes"
"context"
"encoding/json"
"fmt"
"maps"
@@ -82,7 +83,7 @@ func boolPtr(v bool) *bool {
}
func (b *ResourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
runnerScaleSetID, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIDAnnotationKey])
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
return nil, err
}
@@ -124,7 +125,7 @@ func (b *ResourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
VaultConfig: autoscalingRunnerSet.VaultConfig(),
RunnerScaleSetId: runnerScaleSetID,
RunnerScaleSetId: runnerScaleSetId,
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
@@ -495,7 +496,7 @@ func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
}
func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
runnerScaleSetID, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIDAnnotationKey])
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
return nil, err
}
@@ -540,7 +541,7 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: 0,
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
RunnerScaleSetId: runnerScaleSetID,
RunnerScaleSetId: runnerScaleSetId,
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
Proxy: autoscalingRunnerSet.Spec.Proxy,
@@ -555,23 +556,28 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
}
func (b *ResourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
labels := make(map[string]string, len(ephemeralRunnerSet.Labels))
maps.Copy(labels, ephemeralRunnerSet.Labels)
labels[LabelKeyKubernetesComponent] = "runner"
labels := make(map[string]string)
for k, v := range ephemeralRunnerSet.Labels {
if k == LabelKeyKubernetesComponent {
labels[k] = "runner"
} else {
labels[k] = v
}
}
annotations := make(map[string]string)
for key, val := range ephemeralRunnerSet.Annotations {
annotations[key] = val
}
annotations := make(map[string]string, len(ephemeralRunnerSet.Annotations)+1)
maps.Copy(annotations, ephemeralRunnerSet.Annotations)
annotations[AnnotationKeyPatchID] = strconv.Itoa(ephemeralRunnerSet.Spec.PatchID)
return &v1alpha1.EphemeralRunner{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
GenerateName: ephemeralRunnerSet.Name + "-runner-",
Namespace: ephemeralRunnerSet.Namespace,
Labels: labels,
Annotations: annotations,
Finalizers: []string{
ephemeralRunnerFinalizerName,
ephemeralRunnerActionsFinalizerName,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: ephemeralRunnerSet.GetObjectKind().GroupVersionKind().GroupVersion().String(),
@@ -587,17 +593,27 @@ func (b *ResourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme
}
}
func (b *ResourceBuilder) newEphemeralRunnerPod(runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
var newPod corev1.Pod
annotations := make(map[string]string, len(runner.Annotations)+len(runner.Spec.Annotations))
maps.Copy(annotations, runner.Annotations)
maps.Copy(annotations, runner.Spec.Annotations)
labels := map[string]string{}
annotations := map[string]string{}
labels := make(map[string]string, len(runner.Labels)+len(runner.Spec.Labels)+2)
maps.Copy(labels, runner.Labels)
maps.Copy(labels, runner.Spec.Labels)
for k, v := range runner.Labels {
labels[k] = v
}
for k, v := range runner.Spec.Labels {
labels[k] = v
}
labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue)
for k, v := range runner.Annotations {
annotations[k] = v
}
for k, v := range runner.Spec.Annotations {
annotations[k] = v
}
labels[LabelKeyPodTemplateHash] = hash.FNVHashStringObjects(
FilterLabels(labels, LabelKeyRunnerTemplateHash),
annotations,
@@ -674,28 +690,20 @@ func scaleSetListenerConfigName(autoscalingListener *v1alpha1.AutoscalingListene
return fmt.Sprintf("%s-config", autoscalingListener.Name)
}
func hashSuffix(namespace, runnerGroup, configURL string) string {
namespaceHash := hash.FNVHashString(namespace + "@" + runnerGroup + "@" + configURL)
func scaleSetListenerName(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) string {
namespaceHash := hash.FNVHashString(autoscalingRunnerSet.Namespace)
if len(namespaceHash) > 8 {
namespaceHash = namespaceHash[:8]
}
return namespaceHash
}
func scaleSetListenerName(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) string {
return fmt.Sprintf(
"%v-%v-listener",
autoscalingRunnerSet.Name,
hashSuffix(
autoscalingRunnerSet.Namespace,
autoscalingRunnerSet.Spec.RunnerGroup,
autoscalingRunnerSet.Spec.GitHubConfigUrl,
),
)
return fmt.Sprintf("%v-%v-listener", autoscalingRunnerSet.Name, namespaceHash)
}
func proxyListenerSecretName(autoscalingListener *v1alpha1.AutoscalingListener) string {
return autoscalingListener.Name + "-proxy"
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
if len(namespaceHash) > 8 {
namespaceHash = namespaceHash[:8]
}
return fmt.Sprintf("%v-%v-listener-proxy", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
}
func proxyEphemeralRunnerSetSecretName(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) string {

View File

@@ -1,6 +1,7 @@
package actionsgithubcom
import (
"context"
"fmt"
"strings"
"testing"
@@ -27,7 +28,7 @@ func TestLabelPropagation(t *testing.T) {
"directly.excluded.org/arbitrary": "not-excluded-value",
},
Annotations: map[string]string{
runnerScaleSetIDAnnotationKey: "1",
runnerScaleSetIdAnnotationKey: "1",
AnnotationKeyGitHubRunnerGroupName: "test-group",
AnnotationKeyGitHubRunnerScaleSetName: "test-scale-set",
},
@@ -103,7 +104,7 @@ func TestLabelPropagation(t *testing.T) {
Name: "test",
},
}
pod := b.newEphemeralRunnerPod(ephemeralRunner, runnerSecret)
pod := b.newEphemeralRunnerPod(context.TODO(), ephemeralRunner, runnerSecret)
for key := range ephemeralRunner.Labels {
assert.Equal(t, ephemeralRunner.Labels[key], pod.Labels[key])
}
@@ -123,7 +124,7 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
LabelKeyKubernetesVersion: "0.2.0",
},
Annotations: map[string]string{
runnerScaleSetIDAnnotationKey: "1",
runnerScaleSetIdAnnotationKey: "1",
AnnotationKeyGitHubRunnerGroupName: "test-group",
AnnotationKeyGitHubRunnerScaleSetName: "test-scale-set",
},
@@ -189,7 +190,7 @@ func TestOwnershipRelationships(t *testing.T) {
LabelKeyKubernetesVersion: "0.2.0",
},
Annotations: map[string]string{
runnerScaleSetIDAnnotationKey: "1",
runnerScaleSetIdAnnotationKey: "1",
AnnotationKeyGitHubRunnerGroupName: "test-group",
AnnotationKeyGitHubRunnerScaleSetName: "test-scale-set",
annotationKeyValuesHash: "test-hash",
@@ -232,7 +233,7 @@ func TestOwnershipRelationships(t *testing.T) {
Name: "test-secret",
},
}
pod := b.newEphemeralRunnerPod(ephemeralRunner, runnerSecret)
pod := b.newEphemeralRunnerPod(context.TODO(), ephemeralRunner, runnerSecret)
// Test EphemeralRunnerPod ownership
require.Len(t, pod.OwnerReferences, 1, "EphemeralRunnerPod should have exactly one owner reference")

View File

@@ -15,12 +15,10 @@ import (
func TestGitHubConfig(t *testing.T) {
t.Run("when given a valid URL", func(t *testing.T) {
tests := []struct {
name string
configURL string
expected *actions.GitHubConfig
}{
{
name: "repository URL",
configURL: "https://github.com/org/repo",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeRepository,
@@ -31,7 +29,6 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "repository URL with trailing slash",
configURL: "https://github.com/org/repo/",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeRepository,
@@ -42,7 +39,6 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "organization URL",
configURL: "https://github.com/org",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
@@ -53,7 +49,6 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "enterprise URL",
configURL: "https://github.com/enterprises/my-enterprise",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeEnterprise,
@@ -64,7 +59,6 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "enterprise URL with trailing slash",
configURL: "https://github.com/enterprises/my-enterprise/",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeEnterprise,
@@ -75,7 +69,6 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "organization URL with www",
configURL: "https://www.github.com/org",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
@@ -86,7 +79,6 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "organization URL with www and trailing slash",
configURL: "https://www.github.com/org/",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
@@ -97,7 +89,6 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "github local URL",
configURL: "https://github.localhost/org",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
@@ -108,7 +99,6 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "github local org URL",
configURL: "https://my-ghes.com/org",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
@@ -119,7 +109,6 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "github local URL with trailing slash",
configURL: "https://my-ghes.com/org/",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
@@ -130,7 +119,6 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "github local URL with ghe.com",
configURL: "https://my-ghes.ghe.com/org/",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
@@ -143,7 +131,7 @@ func TestGitHubConfig(t *testing.T) {
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
t.Run(test.configURL, func(t *testing.T) {
parsedURL, err := url.Parse(strings.Trim(test.configURL, "/"))
require.NoError(t, err)
test.expected.ConfigURL = parsedURL

View File

@@ -119,84 +119,88 @@ func TestGitHubAPIError(t *testing.T) {
})
}
func TestParseActionsErrorFromResponse(t *testing.T) {
func ParseActionsErrorFromResponse(t *testing.T) {
t.Run("empty content length", func(t *testing.T) {
response := &http.Response{
ContentLength: 0,
Header: http.Header{},
Header: http.Header{
actions.HeaderActionsActivityID: []string{"activity-id"},
},
StatusCode: 404,
}
response.Header.Add(actions.HeaderActionsActivityID, "activity-id")
err := actions.ParseActionsErrorFromResponse(response)
require.Error(t, err)
assert.Equal(t, "activity-id", err.(*actions.ActionsError).ActivityID)
assert.Equal(t, 404, err.(*actions.ActionsError).StatusCode)
assert.Equal(t, "unknown exception", err.(*actions.ActionsError).Err.Error())
assert.Equal(t, err.(*actions.ActionsError).ActivityID, "activity-id")
assert.Equal(t, err.(*actions.ActionsError).StatusCode, 404)
assert.Equal(t, err.(*actions.ActionsError).Err.Error(), "unknown exception")
})
t.Run("contains text plain error", func(t *testing.T) {
errorMessage := "example error message"
response := &http.Response{
ContentLength: int64(len(errorMessage)),
Header: http.Header{
actions.HeaderActionsActivityID: []string{"activity-id"},
"Content-Type": []string{"text/plain"},
},
StatusCode: 404,
Header: http.Header{},
Body: io.NopCloser(strings.NewReader(errorMessage)),
}
response.Header.Add(actions.HeaderActionsActivityID, "activity-id")
response.Header.Add("Content-Type", "text/plain")
err := actions.ParseActionsErrorFromResponse(response)
require.Error(t, err)
var actionsError *actions.ActionsError
require.ErrorAs(t, err, &actionsError)
assert.Equal(t, "activity-id", actionsError.ActivityID)
assert.Equal(t, 404, actionsError.StatusCode)
assert.Equal(t, errorMessage, actionsError.Err.Error())
assert.ErrorAs(t, err, &actionsError)
assert.Equal(t, actionsError.ActivityID, "activity-id")
assert.Equal(t, actionsError.StatusCode, 404)
assert.Equal(t, actionsError.Err.Error(), errorMessage)
})
t.Run("contains json error", func(t *testing.T) {
errorMessage := `{"typeName":"exception-name","message":"example error message"}`
response := &http.Response{
ContentLength: int64(len(errorMessage)),
Header: http.Header{},
Header: http.Header{
actions.HeaderActionsActivityID: []string{"activity-id"},
"Content-Type": []string{"application/json"},
},
StatusCode: 404,
Body: io.NopCloser(strings.NewReader(errorMessage)),
}
response.Header.Add(actions.HeaderActionsActivityID, "activity-id")
response.Header.Add("Content-Type", "application/json")
err := actions.ParseActionsErrorFromResponse(response)
require.Error(t, err)
var actionsError *actions.ActionsError
require.ErrorAs(t, err, &actionsError)
assert.Equal(t, "activity-id", actionsError.ActivityID)
assert.Equal(t, 404, actionsError.StatusCode)
assert.ErrorAs(t, err, &actionsError)
assert.Equal(t, actionsError.ActivityID, "activity-id")
assert.Equal(t, actionsError.StatusCode, 404)
inner, ok := actionsError.Err.(*actions.ActionsExceptionError)
require.True(t, ok)
assert.Equal(t, "exception-name", inner.ExceptionName)
assert.Equal(t, "example error message", inner.Message)
assert.Equal(t, inner.ExceptionName, "exception-name")
assert.Equal(t, inner.Message, "example error message")
})
t.Run("wrapped exception error", func(t *testing.T) {
errorMessage := `{"typeName":"exception-name","message":"example error message"}`
response := &http.Response{
ContentLength: int64(len(errorMessage)),
Header: http.Header{},
Header: http.Header{
actions.HeaderActionsActivityID: []string{"activity-id"},
"Content-Type": []string{"application/json"},
},
StatusCode: 404,
Body: io.NopCloser(strings.NewReader(errorMessage)),
}
response.Header.Add(actions.HeaderActionsActivityID, "activity-id")
response.Header.Add("Content-Type", "application/json")
err := actions.ParseActionsErrorFromResponse(response)
require.Error(t, err)
var actionsExceptionError *actions.ActionsExceptionError
require.ErrorAs(t, err, &actionsExceptionError)
assert.ErrorAs(t, err, &actionsExceptionError)
assert.Equal(t, "exception-name", actionsExceptionError.ExceptionName)
assert.Equal(t, "example error message", actionsExceptionError.Message)
assert.Equal(t, actionsExceptionError.ExceptionName, "exception-name")
assert.Equal(t, actionsExceptionError.Message, "example error message")
})
}

37
go.mod
View File

@@ -3,8 +3,8 @@ module github.com/actions/actions-runner-controller
go 1.25.1
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.4.0
github.com/bradleyfalzon/ghinstallation/v2 v2.17.0
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
@@ -16,27 +16,27 @@ require (
github.com/google/uuid v1.6.0
github.com/gorilla/mux v1.8.1
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
github.com/gruntwork-io/terratest v0.53.0
github.com/gruntwork-io/terratest v0.51.0
github.com/hashicorp/go-retryablehttp v0.7.8
github.com/kelseyhightower/envconfig v1.4.0
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.27.2
github.com/onsi/ginkgo/v2 v2.26.0
github.com/onsi/gomega v1.38.2
github.com/prometheus/client_golang v1.23.2
github.com/stretchr/testify v1.11.1
github.com/teambition/rrule-go v1.8.2
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
golang.org/x/net v0.47.0
golang.org/x/oauth2 v0.33.0
golang.org/x/sync v0.18.0
golang.org/x/net v0.46.0
golang.org/x/oauth2 v0.32.0
golang.org/x/sync v0.17.0
gomodules.xyz/jsonpatch/v2 v2.5.0
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.34.2
k8s.io/apimachinery v0.34.2
k8s.io/client-go v0.34.2
k8s.io/api v0.34.1
k8s.io/apimachinery v0.34.1
k8s.io/client-go v0.34.1
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4
sigs.k8s.io/controller-runtime v0.22.4
sigs.k8s.io/controller-runtime v0.22.3
sigs.k8s.io/yaml v1.6.0
)
@@ -44,7 +44,7 @@ require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
github.com/BurntSushi/toml v1.5.0 // indirect
github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/ProtonMail/go-crypto v1.3.0 // indirect
@@ -163,16 +163,17 @@ require (
github.com/virtuald/go-ordered-json v0.0.0-20170621173500-b18e6e673d74 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
go.uber.org/automaxprocs v1.6.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/crypto v0.43.0 // indirect
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/term v0.37.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/mod v0.28.0 // indirect
golang.org/x/sys v0.37.0 // indirect
golang.org/x/term v0.36.0 // indirect
golang.org/x/text v0.30.0 // indirect
golang.org/x/time v0.13.0 // indirect
golang.org/x/tools v0.38.0 // indirect
golang.org/x/tools v0.37.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect

80
go.sum
View File

@@ -1,10 +1,10 @@
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/Azure/azure-sdk-for-go v51.0.0+incompatible h1:p7blnyJSjJqf5jflHbSGhIhEpXIgIFmYZNg5uwqweso=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0 h1:KpMC6LFL7mqpExyMC9jVOYRiVhLmamjeZfRsUpB7l4s=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
@@ -15,8 +15,8 @@ github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfg
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
@@ -134,8 +134,8 @@ github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BN
github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo=
github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M=
github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk=
github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE=
github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
github.com/gkampitakis/go-snaps v0.5.14 h1:3fAqdB6BCPKHDMHAKRwtPUwYexKtGrNuw8HX/T/4neo=
github.com/gkampitakis/go-snaps v0.5.14/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
@@ -233,8 +233,8 @@ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5T
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/gruntwork-io/go-commons v0.17.2 h1:14dsCJ7M5Vv2X3BIPKeG9Kdy6vTMGhM8L4WZazxfTuY=
github.com/gruntwork-io/go-commons v0.17.2/go.mod h1:zs7Q2AbUKuTarBPy19CIxJVUX/rBamfW8IwuWKniWkE=
github.com/gruntwork-io/terratest v0.53.0 h1:r5U3nfrQCTGvnlJIIh6R5g8z8dwRcjNESYO/wYyOXsI=
github.com/gruntwork-io/terratest v0.53.0/go.mod h1:y2Evi+Ac04QpzF3mbRPqrBjipDN7gjqlw6+OZoy2vX4=
github.com/gruntwork-io/terratest v0.51.0 h1:RCXlCwWlHqhUoxgF6n3hvywvbvrsTXqoqt34BrnLekw=
github.com/gruntwork-io/terratest v0.51.0/go.mod h1:evZHXb8VWDgv5O5zEEwfkwMhkx9I53QR/RB11cISrpg=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -316,8 +316,8 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/ginkgo/v2 v2.26.0 h1:1J4Wut1IlYZNEAWIV3ALrT9NfiaGW2cDCJQSFQMs/gE=
github.com/onsi/ginkgo/v2 v2.26.0/go.mod h1:qhEywmzWTBUY88kfO0BRvX4py7scov9yR+Az2oavUzw=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
@@ -331,6 +331,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs=
github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
@@ -378,6 +380,8 @@ github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAz
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -391,30 +395,30 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 h1:TQwNpfvNkxAVlItJf6Cr5JTsVZoC/Sj7K3OZv2Pc14A=
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -426,14 +430,14 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -441,8 +445,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -475,22 +479,22 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY=
k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw=
k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI=
k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc=
k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4=
k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M=
k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE=
k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A=
sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
sigs.k8s.io/controller-runtime v0.22.3 h1:I7mfqz/a/WdmDCEnXmSPm8/b/yRTy6JsKKENTijTq8Y=
sigs.k8s.io/controller-runtime v0.22.3/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=

View File

@@ -1,93 +0,0 @@
#!/bin/bash
DIR="$(dirname "${BASH_SOURCE[0]}")"
DIR="$(realpath "${DIR}")"
TEST_DIR="$(realpath "${DIR}/../test/actions.github.com")"
export PLATFORMS="linux/amd64"
TARGETS=()
function set_targets() {
local cases
cases="$(find "${TEST_DIR}" -name '*.test.sh' | sed "s#^${TEST_DIR}/##g")"
mapfile -t TARGETS < <(echo "${cases}")
echo "${TARGETS[@]}"
}
function env_test() {
if [[ -z "${GITHUB_TOKEN}" ]]; then
echo "Error: GITHUB_TOKEN is not set"
exit 1
fi
if [[ -z "${TARGET_ORG}" ]]; then
echo "Error: TARGET_ORG is not set"
exit 1
fi
if [[ -z "${TARGET_REPO}" ]]; then
echo "Error: TARGET_REPO is not set"
exit 1
fi
}
function usage() {
echo "Usage: $0 [test_name]"
echo " test_name: the name of the test to run"
echo " if not specified, all tests will be run"
echo " test_name should be the name of the test file without the .test.sh suffix"
echo ""
exit 1
}
function main() {
local failed=()
env_test
if [[ -z "${1}" ]]; then
echo "Running all tests"
set_targets
elif [[ -f "${TEST_DIR}/${1}.test.sh" ]]; then
echo "Running test ${1}"
TARGETS=("${1}.test.sh")
else
usage
fi
for target in "${TARGETS[@]}"; do
echo "============================================================"
test="${TEST_DIR}/${target}"
if [[ ! -x "${test}" ]]; then
echo "Error: test ${test} is not executable or not found"
failed+=("${test}")
continue
fi
echo "Running test ${target}"
if ! "${test}"; then
failed+=("${target}")
echo "---------------------------------"
echo "FAILED: ${target}"
else
echo "---------------------------------"
echo "PASSED: ${target}"
fi
echo "============================================================"
done
if [[ "${#failed[@]}" -gt 0 ]]; then
echo "Failed tests:"
for fail in "${failed[@]}"; do
echo " ${fail}"
done
exit 1
fi
}
main "$@"

View File

@@ -6,9 +6,9 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless
OS_IMAGE ?= ubuntu-22.04
TARGETPLATFORM ?= $(shell arch)
RUNNER_VERSION ?= 2.330.0
RUNNER_VERSION ?= 2.329.0
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.8.0
DOCKER_VERSION ?= 28.0.4
DOCKER_VERSION ?= 24.0.7
# default list of platforms for which multiarch image is built
ifeq (${PLATFORMS}, )

View File

@@ -1,2 +1,2 @@
RUNNER_VERSION=2.330.0
RUNNER_VERSION=2.329.0
RUNNER_CONTAINER_HOOKS_VERSION=0.8.0

View File

@@ -5,7 +5,7 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ENV CHANNEL=stable
ARG DOCKER_COMPOSE_VERSION=v2.38.2
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
# Other arguments

View File

@@ -5,7 +5,7 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ENV CHANNEL=stable
ARG DOCKER_COMPOSE_VERSION=v2.38.2
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
ARG RUNNER_USER_UID=1001

View File

@@ -5,7 +5,7 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ENV CHANNEL=stable
ARG DOCKER_COMPOSE_VERSION=v2.38.2
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
ARG RUNNER_USER_UID=1001

View File

@@ -5,8 +5,8 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ARG CHANNEL=stable
ARG DOCKER_VERSION=28.0.4
ARG DOCKER_COMPOSE_VERSION=v2.38.2
ARG DOCKER_VERSION=24.0.7
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
# Use 1001 and 121 for compatibility with GitHub-hosted runners

View File

@@ -5,8 +5,8 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ARG CHANNEL=stable
ARG DOCKER_VERSION=28.0.4
ARG DOCKER_COMPOSE_VERSION=v2.38.2
ARG DOCKER_VERSION=24.0.7
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
ARG RUNNER_USER_UID=1001
ARG DOCKER_GROUP_GID=121

View File

@@ -5,8 +5,8 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ARG CHANNEL=stable
ARG DOCKER_VERSION=28.0.4
ARG DOCKER_COMPOSE_VERSION=v2.38.2
ARG DOCKER_VERSION=24.0.7
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
ARG RUNNER_USER_UID=1001
ARG DOCKER_GROUP_GID=121

View File

@@ -5,8 +5,8 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ARG CHANNEL=stable
ARG DOCKER_VERSION=28.0.4
ARG DOCKER_COMPOSE_VERSION=v2.38.2
ARG DOCKER_VERSION=24.0.7
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
# Use 1001 and 121 for compatibility with GitHub-hosted runners

View File

@@ -5,8 +5,8 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ARG CHANNEL=stable
ARG DOCKER_VERSION=28.0.4
ARG DOCKER_COMPOSE_VERSION=v2.38.2
ARG DOCKER_VERSION=24.0.7
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
ARG RUNNER_USER_UID=1001
ARG DOCKER_GROUP_GID=121

View File

@@ -5,8 +5,8 @@ ARG RUNNER_VERSION
ARG RUNNER_CONTAINER_HOOKS_VERSION
# Docker and Docker Compose arguments
ARG CHANNEL=stable
ARG DOCKER_VERSION=28.0.4
ARG DOCKER_COMPOSE_VERSION=v2.38.2
ARG DOCKER_VERSION=24.0.7
ARG DOCKER_COMPOSE_VERSION=v2.23.0
ARG DUMB_INIT_VERSION=1.2.5
ARG RUNNER_USER_UID=1001
ARG DOCKER_GROUP_GID=121

View File

@@ -1,31 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: squid
spec:
replicas: 1
selector:
matchLabels:
app: squid
template:
metadata:
labels:
app: squid
spec:
containers:
- name: squid
image: ubuntu/squid:latest
ports:
- containerPort: 3128
---
apiVersion: v1
kind: Service
metadata:
name: squid
spec:
selector:
app: squid
ports:
- protocol: TCP
port: 3128
targetPort: 3128

View File

@@ -1,87 +0,0 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="anonymous-proxy-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_squid() {
echo "Starting squid-proxy"
kubectl apply -f "${DIR}/anonymous-proxy-setup.squid.yaml"
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set proxy.https.url="http://squid.default.svc.cluster.local:3128" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_squid
install_scale_set || {
echo "Scale set installation failed"
NAMESPACE="${ARC_NAMESPACE}" log_arc
delete_cluster
exit 1
}
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -1,31 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: squid
spec:
replicas: 1
selector:
matchLabels:
app: squid
template:
metadata:
labels:
app: squid
spec:
containers:
- name: squid
image: huangtingluo/squid-proxy:latest
ports:
- containerPort: 3128
---
apiVersion: v1
kind: Service
metadata:
name: squid
spec:
selector:
app: squid
ports:
- protocol: TCP
port: 3128
targetPort: 3128

View File

@@ -1,102 +0,0 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="default-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
install_openebs || {
echo "OpenEBS installation failed"
return 1
}
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_squid() {
echo "Starting squid-proxy"
kubectl apply -f "${DIR}/auth-proxy-setup.squid.yaml"
echo "Creating scale set namespace"
kubectl create namespace "${SCALE_SET_NAMESPACE}" || true
echo "Creating squid proxy secret"
kubectl create secret generic proxy-auth \
--namespace=arc-runners \
--from-literal=username=github \
--from-literal=password='actions'
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set proxy.https.url="http://squid.default.svc.cluster.local:3128" \
--set proxy.https.credentialSecretRef="proxy-auth" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--version="${VERSION}" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_squid
install_scale_set || {
echo "Scale set installation failed"
NAMESPACE="${ARC_NAMESPACE}" log_arc
delete_cluster
exit 1
}
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -1,73 +0,0 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="default-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--version="${VERSION}" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -1,74 +0,0 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="default-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-dind-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set containerMode.type="dind" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--version="${VERSION}" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -1,3 +0,0 @@
export TARGET_ORG="org"
export TARGET_REPO="repo"
export GITHUB_TOKEN="token"

View File

@@ -1,196 +0,0 @@
#!/bin/bash
DIR="$(dirname "${BASH_SOURCE[0]}")"
DIR="$(realpath "${DIR}")"
ROOT_DIR="$(realpath "${DIR}/../..")"
export TARGET_ORG="${TARGET_ORG:-actions-runner-controller}"
export TARGET_REPO="${TARGET_REPO:-arc_e2e_test_dummy}"
export IMAGE_NAME="${IMAGE_NAME:-arc-test-image}"
export VERSION="${VERSION:-$(yq .version <"${ROOT_DIR}/charts/gha-runner-scale-set-controller/Chart.yaml")}"
export IMAGE_TAG="${VERSION}"
export IMAGE="${IMAGE_NAME}:${IMAGE_TAG}"
export PLATFORMS="linux/amd64"
COMMIT_SHA="$(git rev-parse HEAD)"
export COMMIT_SHA
function build_image() {
echo "Building ARC image ${IMAGE}"
cd "${ROOT_DIR}" || exit 1
docker buildx build --platform "${PLATFORMS}" \
--build-arg VERSION="${VERSION}" \
--build-arg COMMIT_SHA="${COMMIT_SHA}" \
-t "${IMAGE}" \
-f Dockerfile \
. --load
echo "Created image ${IMAGE}"
cd - || exit 1
}
function create_cluster() {
echo "Deleting minikube cluster if exists"
minikube delete || true
echo "Creating minikube cluster"
minikube start --driver=docker --container-runtime=docker --wait=all
echo "Verifying ns works"
if ! minikube ssh "nslookup github.com >/dev/null 2>&1"; then
echo "Nameserver configuration failed"
exit 1
fi
echo "Loading image into minikube cluster"
minikube image load "${IMAGE}"
echo "Loading runner image into minikube cluster"
minikube image load "ghcr.io/actions/actions-runner:latest"
}
function delete_cluster() {
echo "Deleting minikube cluster"
minikube delete
}
function log_arc() {
echo "ARC logs"
kubectl logs -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-rs-controller
}
function wait_for_arc() {
echo "Waiting for ARC to be ready"
local count=0
while true; do
POD_NAME=$(kubectl get pods -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-rs-controller -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: $POD_NAME"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-rs-controller"
return 1
fi
sleep 1
count=$((count + 1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n "${NAMESPACE}" -l app.kubernetes.io/name=gha-rs-controller
kubectl get pod -n "${NAMESPACE}"
kubectl describe deployment "${NAME}" -n "${NAMESPACE}"
}
function wait_for_scale_set() {
local count=0
while true; do
POD_NAME=$(kubectl get pods -n "${NAMESPACE}" -l "actions.github.com/scale-set-name=${NAME}" -o name)
if [ -n "$POD_NAME" ]; then
echo "Pod found: ${POD_NAME}"
break
fi
if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=${NAME}"
return 1
fi
sleep 1
count=$((count + 1))
done
kubectl wait --timeout=30s --for=condition=ready pod -n "${NAMESPACE}" -l "actions.github.com/scale-set-name=${NAME}"
kubectl get pod -n "${NAMESPACE}" -l "actions.github.com/scale-set-name=${NAME}"
}
function cleanup_scale_set() {
helm uninstall "${INSTALLATION_NAME}" --namespace "${NAMESPACE}" --debug
kubectl wait --timeout=40s --for=delete autoscalingrunnersets -n "${NAMESPACE}" -l app.kubernetes.io/instance="${INSTALLATION_NAME}"
}
function print_results() {
local failed=("$@")
if [[ "${#failed[@]}" -ne 0 ]]; then
echo "----------------------------------"
echo "The following tests failed:"
for test in "${failed[@]}"; do
echo " - ${test}"
done
return 1
else
echo "----------------------------------"
echo "All tests passed!"
fi
}
function run_workflow() {
echo "Checking if the workflow file exists"
gh workflow view -R "${TARGET_ORG}/${TARGET_REPO}" "${WORKFLOW_FILE}" || return 1
local queue_time
queue_time="$(date -u +%FT%TZ)"
echo "Running workflow ${WORKFLOW_FILE}"
gh workflow run -R "${TARGET_ORG}/${TARGET_REPO}" "${WORKFLOW_FILE}" --ref main -f arc_name="${SCALE_SET_NAME}" || return 1
echo "Waiting for run to start"
local count=0
local run_id=
while true; do
if [[ "${count}" -ge 12 ]]; then
echo "Timeout waiting for run to start"
return 1
fi
run_id=$(gh run list -R "${TARGET_ORG}/${TARGET_REPO}" --workflow "${WORKFLOW_FILE}" --created ">${queue_time}" --json "name,databaseId" --jq ".[] | select(.name | contains(\"${SCALE_SET_NAME}\")) | .databaseId")
echo "Run ID: ${run_id}"
if [ -n "$run_id" ]; then
echo "Run found!"
break
fi
echo "Run not found yet, waiting 5 seconds"
sleep 5
count=$((count + 1))
done
echo "Waiting for run to complete"
local code
code=$(gh run watch "${run_id}" -R "${TARGET_ORG}/${TARGET_REPO}" --exit-status &>/dev/null)
if [[ "${code}" -ne 0 ]]; then
echo "Run failed with exit code ${code}"
return 1
fi
echo "Run completed successfully"
}
function retry() {
local retries=$1
shift
local delay=$1
shift
local n=1
until "$@"; do
if [[ $n -ge $retries ]]; then
echo "Attempt $n failed! No more retries left."
return 1
else
echo "Attempt $n failed! Retrying in $delay seconds..."
sleep "$delay"
n=$((n + 1))
fi
done
}
function install_openebs() {
echo "Install openebs/dynamic-localpv-provisioner"
helm repo add openebs https://openebs.github.io/openebs
helm repo update
helm install openebs openebs/openebs -n openebs --create-namespace
}

View File

@@ -1,94 +0,0 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh" || {
echo "Failed to source helper.sh"
exit 1
}
SCALE_SET_NAME="init-min-runners-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Installing ARC"
helm install arc \
--namespace "arc-systems" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
--set flags.updateStrategy="eventual" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set minRunners=5 \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function assert_5_runners() {
echo "[*] Asserting 5 runners are created"
local count=0
while true; do
pod_count=$(kubectl get pods -n arc-runners --no-headers | wc -l)
if [[ "${pod_count}" = 5 ]]; then
echo "[*] Found 5 runners as expected"
break
fi
if [[ "$count" -ge 30 ]]; then
echo "Timeout waiting for 5 pods to be created"
exit 1
fi
sleep 1
count=$((count + 1))
done
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
assert_5_runners || failed+=("assert_5_runners")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -1,81 +0,0 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="kubernetes-mode-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-kubernetes-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
install_openebs || {
echo "OpenEBS installation failed"
return 1
}
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set containerMode.type="kubernetes" \
--set containerMode.kubernetesModeWorkVolumeClaim.accessModes={"ReadWriteOnce"} \
--set containerMode.kubernetesModeWorkVolumeClaim.storageClassName="openebs-hostpath" \
--set containerMode.kubernetesModeWorkVolumeClaim.resources.requests.storage="1Gi" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -1,29 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: mitmproxy
namespace: mitmproxy
labels:
app: mitmproxy
spec:
containers:
- name: mitmproxy
image: mitmproxy/mitmproxy:latest
command: ["mitmdump"]
ports:
- containerPort: 8080
name: proxy
---
apiVersion: v1
kind: Service
metadata:
name: mitmproxy
namespace: mitmproxy
spec:
selector:
app: mitmproxy
ports:
- port: 8080
targetPort: 8080
name: proxy

View File

@@ -1,148 +0,0 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh" || {
echo "Failed to source helper.sh"
exit 1
}
TEMP_DIR=$(mktemp -d)
LOCAL_CERT_PATH="${TEMP_DIR}/mitmproxy-ca-cert.crt"
MITM_CERT_PATH="/root/.mitmproxy/mitmproxy-ca-cert.pem"
trap 'rm -rf "$TEMP_DIR"' EXIT
SCALE_SET_NAME="self-signed-crt-$(date '+%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
MITMPROXY_NAMESPACE="mitmproxy"
MITMPROXY_POD_NAME="mitmproxy"
function install_arc() {
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Creating namespace ${SCALE_SET_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ca-cert config map"
kubectl -n "${SCALE_SET_NAMESPACE}" create configmap ca-cert \
--from-file=mitmproxy-ca-cert.crt="${LOCAL_CERT_PATH}"
echo "Installing scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set proxy.https.url="http://mitmproxy.mitmproxy.svc.cluster.local:8080" \
--set "proxy.noProxy[0]=10.96.0.1:443" \
--set "githubServerTLS.certificateFrom.configMapKeyRef.name=ca-cert" \
--set "githubServerTLS.certificateFrom.configMapKeyRef.key=mitmproxy-ca-cert.crt" \
--set "githubServerTLS.runnerMountPath=/usr/local/share/ca-certificates/" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function wait_for_mitmproxy_ready() {
echo "Waiting for mitmproxy pod to be ready"
# Wait for pod to be running
if ! kubectl wait --for=condition=ready pod -n "${MITMPROXY_NAMESPACE}" "${MITMPROXY_POD_NAME}" --timeout=60s; then
echo "Timeout waiting for mitmproxy pod"
kubectl get pods -n "${MITMPROXY_NAMESPACE}" || true
kubectl describe pod -n "${MITMPROXY_NAMESPACE}" "${MITMPROXY_POD_NAME}" || true
kubectl logs -n "${MITMPROXY_NAMESPACE}" "${MITMPROXY_POD_NAME}" || true
return 1
fi
echo "Mitmproxy pod is ready, trying to copy the certitficate..."
# Verify certificate exists
retry 15 1 kubectl exec -n "${MITMPROXY_NAMESPACE}" "${MITMPROXY_POD_NAME}" -- test -f "${MITM_CERT_PATH}"
echo "Getting mitmproxy CA certificate from pod"
if ! kubectl exec -n "${MITMPROXY_NAMESPACE}" "${MITMPROXY_POD_NAME}" -- cat "${MITM_CERT_PATH}" >"${LOCAL_CERT_PATH}"; then
echo "Failed to get mitmproxy CA certificate from pod"
return 1
fi
echo "Mitmproxy certificate generated successfully and stored to ${LOCAL_CERT_PATH}"
return 0
}
function run_mitmproxy() {
echo "Deploying mitmproxy to Kubernetes"
# Create namespace
kubectl create namespace "${MITMPROXY_NAMESPACE}" || true
# Create mitmproxy pod and service
kubectl apply -f "${DIR}/self-signed-ca-setup.mitm.yaml"
if ! wait_for_mitmproxy_ready; then
return 1
fi
echo "Mitmproxy is ready"
}
function main() {
local failed=()
build_image
create_cluster
install_arc
run_mitmproxy || {
echo "Failed to run mitmproxy"
echo "ARC logs:"
NAMESPACE="${ARC_NAMESPACE}" log_arc
echo "Deleting cluster..."
delete_cluster
exit 1
}
install_scale_set || {
echo "Failed to run mitmproxy"
echo "ARC logs:"
NAMESPACE="${ARC_NAMESPACE}" log_arc
echo "Deleting cluster..."
delete_cluster
exit 1
}
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -1,74 +0,0 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
SCALE_SET_NAME="default-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-workflow.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="${SCALE_SET_NAMESPACE}"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
--set flags.watchSingleNamespace="${ARC_NAMESPACE}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--version="${VERSION}" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -1,146 +0,0 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh" || {
echo "Failed to source helper.sh"
exit 1
}
SCALE_SET_NAME="update-strategy-$(date '+%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
WORKFLOW_FILE="arc-test-sleepy-matrix.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
--set flags.updateStrategy="eventual" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function upgrade_scale_set() {
echo "Upgrading scale set ${SCALE_SET_NAME}/${SCALE_SET_NAMESPACE}"
helm upgrade "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set template.spec.containers[0].name="runner" \
--set template.spec.containers[0].image="ghcr.io/actions/actions-runner:latest" \
--set template.spec.containers[0].command={"/home/runner/run.sh"} \
--set template.spec.containers[0].env[0].name="TEST" \
--set template.spec.containers[0].env[0].value="E2E TESTS" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--version="${VERSION}" \
--debug
}
function assert_listener_deleted() {
local count=0
while true; do
LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name="${SCALE_SET_NAME}" -n "${ARC_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n "${SCALE_SET_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
RESOURCES="$(kubectl get pods -A)"
if [ "${LISTENER_COUNT}" -eq 0 ]; then
echo "Listener has been deleted"
echo "${RESOURCES}"
return 0
fi
if [ "${count}" -ge 60 ]; then
echo "Timeout waiting for listener to be deleted"
echo "${RESOURCES}"
return 1
fi
echo "Waiting for listener to be deleted"
echo "Listener count: ${LISTENER_COUNT} target: 0 | Runners count: ${RUNNERS_COUNT} target: 3"
sleep 1
count=$((count + 1))
done
}
function assert_listener_recreated() {
count=0
while true; do
LISTENER_COUNT="$(kubectl get pods -l actions.github.com/scale-set-name="${SCALE_SET_NAME}" -n "${ARC_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
RUNNERS_COUNT="$(kubectl get pods -l app.kubernetes.io/component=runner -n "${SCALE_SET_NAMESPACE}" --field-selector=status.phase=Running -o=jsonpath='{.items}' | jq 'length')"
RESOURCES="$(kubectl get pods -A)"
if [ "${LISTENER_COUNT}" -eq 1 ]; then
echo "Listener is up!"
echo "${RESOURCES}"
return 0
fi
if [ "${count}" -ge 120 ]; then
echo "Timeout waiting for listener to be recreated"
echo "${RESOURCES}"
return 1
fi
echo "Waiting for listener to be recreated"
echo "Listener count: ${LISTENER_COUNT} target: 1 | Runners count: ${RUNNERS_COUNT} target: 0"
sleep 1
count=$((count + 1))
done
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
WORKFLOW_FILE="${WORKFLOW_FILE}" SCALE_SET_NAME="${SCALE_SET_NAME}" run_workflow || failed+=("run_workflow")
upgrade_scale_set || failed+=("upgrade_scale_set")
assert_listener_deleted || failed+=("assert_listener_deleted")
assert_listener_recreated || failed+=("assert_listener_recreated")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -36,7 +36,7 @@ var (
testResultCMNamePrefix = "test-result-"
RunnerVersion = "2.330.0"
RunnerVersion = "2.329.0"
RunnerContainerHooksVersion = "0.8.0"
)
@@ -455,7 +455,7 @@ func buildVars(repo, ubuntuVer string) vars {
runnerRootlessDindImage = testing.Img(runnerRootlessDindImageRepo, runnerImageTag)
dindSidecarImageRepo = "docker"
dindSidecarImageTag = "28.0.4-dind"
dindSidecarImageTag = "24.0.7-dind"
dindSidecarImage = testing.Img(dindSidecarImageRepo, dindSidecarImageTag)
)