Compare commits

..

10 Commits

Author SHA1 Message Date
dependabot[bot]
419f87d7c7 Bump the gomod group with 2 updates
Bumps the gomod group with 2 updates: [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) and [k8s.io/client-go](https://github.com/kubernetes/client-go).


Updates `k8s.io/apimachinery` from 0.34.3 to 0.35.0
- [Commits](https://github.com/kubernetes/apimachinery/compare/v0.34.3...v0.35.0)

Updates `k8s.io/client-go` from 0.34.3 to 0.35.0
- [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/kubernetes/client-go/compare/v0.34.3...v0.35.0)

---
updated-dependencies:
- dependency-name: k8s.io/apimachinery
  dependency-version: 0.35.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: gomod
- dependency-name: k8s.io/client-go
  dependency-version: 0.35.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: gomod
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-22 22:03:34 +00:00
Nikola Jokic
bfe78ccd5d Make restart pod more flexible to different failure scenarios (#4340) 2025-12-19 15:49:42 +01:00
dependabot[bot]
3fd1048576 Bump golangci/golangci-lint-action from 9.1.0 to 9.2.0 in the actions group (#4335)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-16 14:15:21 +01:00
dependabot[bot]
180e0dabb2 Bump the gomod group across 1 directory with 10 updates (#4338)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-16 14:05:44 +01:00
Nikola Jokic
50038fba61 Re-schedule if the failed reason starts with OutOf (#4336) 2025-12-16 13:26:44 +01:00
Nikola Jokic
82d5579696 Restart the listener if pod is evicted (#4332) 2025-12-09 17:55:09 +01:00
Nikola Jokic
540269880f Typo in test name caused test to not execute (#4330) 2025-11-27 15:31:57 +01:00
dependabot[bot]
9ebb97fe2e Bump the actions group with 3 updates (#4328)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-25 12:00:40 +01:00
Nikola Jokic
75c401f6c1 Remove old e2e tests (#4325) 2025-11-25 00:37:32 +01:00
dependabot[bot]
a9e371e083 Bump the actions group across 1 directory with 4 updates (#4309)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2025-11-21 19:23:19 +01:00
24 changed files with 603 additions and 1508 deletions

View File

@@ -1,215 +0,0 @@
name: 'Execute and Assert ARC E2E Test Action'
description: 'Queue E2E test workflow and assert workflow run result to be succeed'
inputs:
auth-token:
description: 'GitHub access token to queue workflow run'
required: true
repo-owner:
description: "The repository owner name that has the test workflow file, ex: actions"
required: true
repo-name:
description: "The repository name that has the test workflow file, ex: test"
required: true
workflow-file:
description: 'The file name of the workflow yaml, ex: test.yml'
required: true
arc-name:
description: 'The name of the configured gha-runner-scale-set'
required: true
arc-namespace:
description: 'The namespace of the configured gha-runner-scale-set'
required: true
arc-controller-namespace:
description: 'The namespace of the configured gha-runner-scale-set-controller'
required: true
wait-to-finish:
description: 'Wait for the workflow run to finish'
required: true
default: "true"
wait-to-running:
description: 'Wait for the workflow run to start running'
required: true
default: "false"
runs:
using: "composite"
steps:
- name: Queue test workflow
shell: bash
id: queue_workflow
run: |
queue_time=`date +%FT%TZ`
echo "queue_time=$queue_time" >> $GITHUB_OUTPUT
curl -X POST https://api.github.com/repos/${{inputs.repo-owner}}/${{inputs.repo-name}}/actions/workflows/${{inputs.workflow-file}}/dispatches \
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token ${{inputs.auth-token}}" \
-d '{"ref": "main", "inputs": { "arc_name": "${{inputs.arc-name}}" } }'
- name: Fetch workflow run & job ids
uses: actions/github-script@v7
id: query_workflow
with:
script: |
// Try to find the workflow run triggered by the previous step using the workflow_dispatch event.
// - Find recently create workflow runs in the test repository
// - For each workflow run, list its workflow job and see if the job's labels contain `inputs.arc-name`
// - Since the inputs.arc-name should be unique per e2e workflow run, once we find the job with the label, we find the workflow that we just triggered.
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms))
}
const owner = '${{inputs.repo-owner}}'
const repo = '${{inputs.repo-name}}'
const workflow_id = '${{inputs.workflow-file}}'
let workflow_run_id = 0
let workflow_job_id = 0
let workflow_run_html_url = ""
let count = 0
while (count++<12) {
await sleep(10 * 1000);
let listRunResponse = await github.rest.actions.listWorkflowRuns({
owner: owner,
repo: repo,
workflow_id: workflow_id,
created: '>${{steps.queue_workflow.outputs.queue_time}}'
})
if (listRunResponse.data.total_count > 0) {
console.log(`Found some new workflow runs for ${workflow_id}`)
for (let i = 0; i<listRunResponse.data.total_count; i++) {
let workflowRun = listRunResponse.data.workflow_runs[i]
console.log(`Check if workflow run ${workflowRun.id} is triggered by us.`)
let listJobResponse = await github.rest.actions.listJobsForWorkflowRun({
owner: owner,
repo: repo,
run_id: workflowRun.id
})
console.log(`Workflow run ${workflowRun.id} has ${listJobResponse.data.total_count} jobs.`)
if (listJobResponse.data.total_count > 0) {
for (let j = 0; j<listJobResponse.data.total_count; j++) {
let workflowJob = listJobResponse.data.jobs[j]
console.log(`Check if workflow job ${workflowJob.id} is triggered by us.`)
console.log(JSON.stringify(workflowJob.labels));
if (workflowJob.labels.includes('${{inputs.arc-name}}')) {
console.log(`Workflow job ${workflowJob.id} (Run id: ${workflowJob.run_id}) is triggered by us.`)
workflow_run_id = workflowJob.run_id
workflow_job_id = workflowJob.id
workflow_run_html_url = workflowRun.html_url
break
}
}
}
if (workflow_job_id > 0) {
break;
}
}
}
if (workflow_job_id > 0) {
break;
}
}
if (workflow_job_id == 0) {
core.setFailed(`Can't find workflow run and workflow job triggered to 'runs-on ${{inputs.arc-name}}'`)
} else {
core.setOutput('workflow_run', workflow_run_id);
core.setOutput('workflow_job', workflow_job_id);
core.setOutput('workflow_run_url', workflow_run_html_url);
}
- name: Generate summary about the triggered workflow run
shell: bash
run: |
cat <<-EOF > $GITHUB_STEP_SUMMARY
| **Triggered workflow run** |
|:--------------------------:|
| ${{steps.query_workflow.outputs.workflow_run_url}} |
EOF
- name: Wait for workflow to start running
if: inputs.wait-to-running == 'true' && inputs.wait-to-finish == 'false'
uses: actions/github-script@v7
with:
script: |
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms))
}
const owner = '${{inputs.repo-owner}}'
const repo = '${{inputs.repo-name}}'
const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}}
const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}}
let count = 0
while (count++<10) {
await sleep(30 * 1000);
let getRunResponse = await github.rest.actions.getWorkflowRun({
owner: owner,
repo: repo,
run_id: workflow_run_id
})
console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`);
if (getRunResponse.data.status == 'in_progress') {
console.log(`Workflow run is in progress.`)
return
}
}
core.setFailed(`The triggered workflow run didn't start properly using ${{inputs.arc-name}}`)
- name: Wait for workflow to finish successfully
if: inputs.wait-to-finish == 'true'
uses: actions/github-script@v7
with:
script: |
// Wait 5 minutes and make sure the workflow run we triggered completed with result 'success'
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms))
}
const owner = '${{inputs.repo-owner}}'
const repo = '${{inputs.repo-name}}'
const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}}
const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}}
let count = 0
while (count++<10) {
await sleep(30 * 1000);
let getRunResponse = await github.rest.actions.getWorkflowRun({
owner: owner,
repo: repo,
run_id: workflow_run_id
})
console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`);
if (getRunResponse.data.status == 'completed') {
if ( getRunResponse.data.conclusion == 'success') {
console.log(`Workflow run finished properly.`)
return
} else {
core.setFailed(`The triggered workflow run finish with result ${getRunResponse.data.conclusion}`)
return
}
}
}
core.setFailed(`The triggered workflow run didn't finish properly using ${{inputs.arc-name}}`)
- name: Gather listener logs
shell: bash
if: always()
run: |
LISTENER_POD="$(kubectl get autoscalinglisteners.actions.github.com -n arc-systems -o jsonpath='{.items[*].metadata.name}')"
kubectl logs $LISTENER_POD -n ${{inputs.arc-controller-namespace}}
- name: Gather coredns logs
shell: bash
if: always()
run: |
kubectl logs deployments/coredns -n kube-system
- name: cleanup
if: inputs.wait-to-finish == 'true'
shell: bash
run: |
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
kubectl wait --timeout=30s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-namespace}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
- name: Gather controller logs
shell: bash
if: always()
run: |
kubectl logs deployment/arc-gha-rs-controller -n ${{inputs.arc-controller-namespace}}

View File

@@ -1,65 +0,0 @@
name: "Setup ARC E2E Test Action"
description: "Build controller image, create kind cluster, load the image, and exchange ARC configure token."
inputs:
app-id:
description: "GitHub App Id for exchange access token"
required: true
app-pk:
description: "GitHub App private key for exchange access token"
required: true
image-name:
description: "Local docker image name for building"
required: true
image-tag:
description: "Tag of ARC Docker image for building"
required: true
target-org:
description: "The test organization for ARC e2e test"
required: true
outputs:
token:
description: "Token to use for configure ARC"
value: ${{steps.config-token.outputs.token}}
runs:
using: "composite"
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
# failures pushing images to GHCR
version: v0.9.1
driver-opts: image=moby/buildkit:v0.10.6
- name: Build controller image
# https://github.com/docker/build-push-action/releases/tag/v6.18.0
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
with:
file: Dockerfile
platforms: linux/amd64
load: true
build-args: |
DOCKER_IMAGE_NAME=${{inputs.image-name}}
VERSION=${{inputs.image-tag}}
tags: |
${{inputs.image-name}}:${{inputs.image-tag}}
no-cache: true
- name: Create minikube cluster and load image
shell: bash
run: |
minikube start
minikube image load ${{inputs.image-name}}:${{inputs.image-tag}}
- name: Get configure token
id: config-token
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ inputs.app-id }}
application_private_key: ${{ inputs.app-pk }}
organization: ${{ inputs.target-org}}

View File

@@ -1,51 +0,0 @@
name: "Setup Docker"
inputs:
username:
description: "Username"
required: true
password:
description: "Password"
required: true
ghcr_username:
description: "GHCR username. Usually set from the github.actor variable"
required: true
ghcr_password:
description: "GHCR password. Usually set from the secrets.GITHUB_TOKEN variable"
required: true
runs:
using: "composite"
steps:
- name: Get Short SHA
id: vars
run: |
echo "sha_short=${GITHUB_SHA::7}" >> $GITHUB_ENV
shell: bash
- name: Set up QEMU
# https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
- name: Set up Docker Buildx
# https://github.com/docker/setup-buildx-action/releases/tag/v3.10.0
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
with:
version: latest
- name: Login to DockerHub
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }}
# https://github.com/docker/login-action/releases/tag/v3.4.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
with:
username: ${{ inputs.username }}
password: ${{ inputs.password }}
- name: Login to GitHub Container Registry
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }}
# https://github.com/docker/login-action/releases/tag/v3.4.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
with:
registry: ghcr.io
username: ${{ inputs.ghcr_username }}
password: ${{ inputs.ghcr_password }}

View File

@@ -40,7 +40,7 @@ jobs:
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
fetch-depth: 0
@@ -134,7 +134,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
fetch-depth: 0
@@ -184,7 +184,7 @@ jobs:
# this workaround is intended to move the index.yaml to the target repo
# where the github pages are hosted
- name: Checkout target repository
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
path: ${{ env.CHART_TARGET_REPO }}

View File

@@ -39,7 +39,7 @@ jobs:
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v6
- uses: actions/setup-go@v6
with:

View File

@@ -30,7 +30,7 @@ jobs:
name: Trigger Build and Push of Runner Images
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- name: Get runner version
id: versions
run: |

View File

@@ -24,7 +24,7 @@ jobs:
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- name: Get runner current and latest versions
id: runner_versions
@@ -69,7 +69,7 @@ jobs:
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- name: PR Name
id: pr_name
@@ -124,7 +124,7 @@ jobs:
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- name: New branch
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"

View File

@@ -40,7 +40,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
fetch-depth: 0

View File

@@ -24,7 +24,7 @@ jobs:
name: runner / shellcheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- name: "Run shellcheck"
run: make shellcheck
@@ -33,7 +33,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Run tests
run: |

File diff suppressed because it is too large Load Diff

View File

@@ -45,7 +45,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
@@ -119,7 +119,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
@@ -166,7 +166,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}

View File

@@ -36,7 +36,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
fetch-depth: 0
@@ -111,7 +111,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v6
- uses: actions/setup-go@v6
with:
go-version-file: "go.mod"

View File

@@ -55,7 +55,7 @@ jobs:
TARGET_REPO: actions-runner-controller
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Get Token
id: get_workflow_token
@@ -90,7 +90,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Login to GitHub Container Registry
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef

View File

@@ -25,7 +25,7 @@ jobs:
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Install Go
uses: actions/setup-go@v6

View File

@@ -16,7 +16,7 @@ jobs:
check_for_first_interaction:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- uses: actions/first-interaction@v3
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -29,7 +29,7 @@ jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- uses: actions/setup-go@v6
with:
go-version-file: "go.mod"
@@ -42,13 +42,13 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- uses: actions/setup-go@v6
with:
go-version-file: "go.mod"
cache: false
- name: golangci-lint
uses: golangci/golangci-lint-action@0a35821d5c230e903fcfe077583637dea1b27b47
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20
with:
only-new-issues: true
version: v2.5.0
@@ -56,7 +56,7 @@ jobs:
generate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- uses: actions/setup-go@v6
with:
go-version-file: "go.mod"
@@ -69,7 +69,7 @@ jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- uses: actions/setup-go@v6
with:
go-version-file: "go.mod"

View File

@@ -211,7 +211,14 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
// TODO: make sure the role binding has the up-to-date role and service account
listenerPod := new(corev1.Pod)
if err := r.Get(ctx, client.ObjectKey{Namespace: autoscalingListener.Namespace, Name: autoscalingListener.Name}, listenerPod); err != nil {
if err := r.Get(
ctx,
client.ObjectKey{
Namespace: autoscalingListener.Namespace,
Name: autoscalingListener.Name,
},
listenerPod,
); err != nil {
if !kerrors.IsNotFound(err) {
log.Error(err, "Unable to get listener pod", "namespace", autoscalingListener.Namespace, "name", autoscalingListener.Name)
return ctrl.Result{}, err
@@ -229,37 +236,30 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
cs := listenerContainerStatus(listenerPod)
switch {
case listenerPod.Status.Reason == "Evicted":
log.Info(
"Listener pod is evicted",
"phase", listenerPod.Status.Phase,
"reason", listenerPod.Status.Reason,
"message", listenerPod.Status.Message,
)
return ctrl.Result{}, r.deleteListenerPod(ctx, autoscalingListener, listenerPod, log)
case cs == nil:
log.Info("Listener pod is not ready", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return ctrl.Result{}, nil
case cs.State.Terminated != nil:
log.Info("Listener pod is terminated", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", cs.State.Terminated.Reason, "message", cs.State.Terminated.Message)
log.Info(
"Listener pod is terminated",
"namespace", listenerPod.Namespace,
"name", listenerPod.Name,
"reason", cs.State.Terminated.Reason,
"message", cs.State.Terminated.Message,
)
if err := r.publishRunningListener(autoscalingListener, false); err != nil {
log.Error(err, "Unable to publish runner listener down metric", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
}
return ctrl.Result{}, r.deleteListenerPod(ctx, autoscalingListener, listenerPod, log)
if listenerPod.DeletionTimestamp.IsZero() {
log.Info("Deleting the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return ctrl.Result{}, err
}
// delete the listener config secret as well, so it gets recreated when the listener pod is recreated, with any new data if it exists
var configSecret corev1.Secret
err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &configSecret)
switch {
case err == nil && configSecret.DeletionTimestamp.IsZero():
log.Info("Deleting the listener config secret")
if err := r.Delete(ctx, &configSecret); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to delete listener config secret: %w", err)
}
case !kerrors.IsNotFound(err):
return ctrl.Result{}, fmt.Errorf("failed to get the listener config secret: %w", err)
}
}
return ctrl.Result{}, nil
case cs.State.Running != nil:
if err := r.publishRunningListener(autoscalingListener, true); err != nil {
log.Error(err, "Unable to publish running listener", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
@@ -269,10 +269,39 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
func (r *AutoscalingListenerReconciler) deleteListenerPod(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerPod *corev1.Pod, log logr.Logger) error {
if err := r.publishRunningListener(autoscalingListener, false); err != nil {
log.Error(err, "Unable to publish runner listener down metric", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
}
if listenerPod.DeletionTimestamp.IsZero() {
log.Info("Deleting the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return err
}
// delete the listener config secret as well, so it gets recreated when the listener pod is recreated, with any new data if it exists
var configSecret corev1.Secret
err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &configSecret)
switch {
case err == nil && configSecret.DeletionTimestamp.IsZero():
log.Info("Deleting the listener config secret")
if err := r.Delete(ctx, &configSecret); err != nil {
return fmt.Errorf("failed to delete listener config secret: %w", err)
}
case !kerrors.IsNotFound(err):
return fmt.Errorf("failed to get the listener config secret: %w", err)
}
}
return nil
}
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (requeue bool, err error) {
logger.Info("Cleaning up the listener pod")
listenerPod := new(corev1.Pod)

View File

@@ -671,6 +671,55 @@ var _ = Describe("Test AutoScalingListener customization", func() {
autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be created")
})
It("Should re-create pod when the listener pod is evicted", func() {
pod := new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(
ctx,
client.ObjectKey{
Name: autoscalingListener.Name,
Namespace: autoscalingListener.Namespace,
},
pod,
)
if err != nil {
return "", err
}
return pod.Name, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(
BeEquivalentTo(autoscalingListener.Name),
"Pod should be created",
)
updated := pod.DeepCopy()
oldPodUID := string(pod.UID)
updated.Status.Reason = "Evicted"
err := k8sClient.Status().Update(ctx, updated)
Expect(err).NotTo(HaveOccurred(), "failed to update pod status")
pod = new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return string(pod.UID), nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).ShouldNot(
BeEquivalentTo(oldPodUID),
"Pod should be created",
)
})
})
})

View File

@@ -312,26 +312,46 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
cs := runnerContainerStatus(pod)
switch {
case cs == nil:
// starting, no container state yet
log.Info("Waiting for runner container status to be available")
return ctrl.Result{}, nil
case cs.State.Terminated == nil: // still running or evicted
if pod.Status.Phase == corev1.PodFailed && pod.Status.Reason == "Evicted" {
log.Info("Pod set the termination phase, but container state is not terminated. Deleting pod",
"PodPhase", pod.Status.Phase,
"PodReason", pod.Status.Reason,
"PodMessage", pod.Status.Message,
)
case pod.Status.Phase == corev1.PodFailed: // All containers are stopped
log.Info("Pod is in failed phase, inspecting runner container status",
"podReason", pod.Status.Reason,
"podMessage", pod.Status.Message,
"podConditions", pod.Status.Conditions,
)
// If the runner pod did not have chance to start, terminated state may not be set.
// Therefore, we should try to restart it.
if cs == nil || cs.State.Terminated == nil {
log.Info("Runner container does not have state set, deleting pod as failed so it can be restarted")
return ctrl.Result{}, r.deleteEphemeralRunnerOrPod(ctx, ephemeralRunner, pod, log)
}
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
log.Error(err, "failed to delete pod as failed on pod.Status.Phase: Failed")
if cs.State.Terminated.ExitCode == 0 {
log.Info("Runner container has succeeded but pod is in failed phase; Assume successful exit")
// If the pod is in a failed state, that means that at least one container exited with non-zero exit code.
// If the runner container exits with 0, we assume that the runner has finished successfully.
// If side-car container exits with non-zero, it shouldn't affect the runner. Runner exit code
// drives the controller's inference of whether the job has succeeded or failed.
if err := r.Delete(ctx, ephemeralRunner); err != nil {
log.Error(err, "Failed to delete ephemeral runner after successful completion")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
log.Info("Ephemeral runner container is still running")
log.Error(
errors.New("ephemeral runner container has failed, with runner container exit code non-zero"),
"Ephemeral runner container has failed, and runner container termination exit code is non-zero",
"containerTerminatedState", cs.State.Terminated,
)
return ctrl.Result{}, r.deleteEphemeralRunnerOrPod(ctx, ephemeralRunner, pod, log)
case cs == nil:
// starting, no container state yet
log.Info("Waiting for runner container status to be available")
return ctrl.Result{}, nil
case cs.State.Terminated == nil: // container is not terminated and pod phase is not failed, so runner is still running
log.Info("Runner container is still running; updating ephemeral runner status")
if err := r.updateRunStatusFromPod(ctx, ephemeralRunner, pod, log); err != nil {
log.Info("Failed to update ephemeral runner status. Requeue to not miss this event")
return ctrl.Result{}, err
@@ -340,36 +360,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
case cs.State.Terminated.ExitCode != 0: // failed
log.Info("Ephemeral runner container failed", "exitCode", cs.State.Terminated.ExitCode)
if ephemeralRunner.HasJob() {
log.Error(
errors.New("ephemeral runner has a job assigned, but the pod has failed"),
"Ephemeral runner either has faulty entrypoint or something external killing the runner",
)
log.Info("Deleting the ephemeral runner that has a job assigned but the pod has failed")
if err := r.Delete(ctx, ephemeralRunner); err != nil {
log.Error(err, "Failed to delete the ephemeral runner that has a job assigned but the pod has failed")
return ctrl.Result{}, err
}
log.Info("Deleted the ephemeral runner that has a job assigned but the pod has failed")
log.Info("Trying to remove the runner from the service")
actionsClient, err := r.GetActionsService(ctx, ephemeralRunner)
if err != nil {
log.Error(err, "Failed to get actions client for removing the runner from the service")
return ctrl.Result{}, nil
}
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
log.Error(err, "Failed to remove the runner from the service")
return ctrl.Result{}, nil
}
log.Info("Removed the runner from the service")
return ctrl.Result{}, nil
}
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
log.Error(err, "Failed to delete runner pod on failure")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
return ctrl.Result{}, r.deleteEphemeralRunnerOrPod(ctx, ephemeralRunner, pod, log)
default: // succeeded
log.Info("Ephemeral runner has finished successfully, deleting ephemeral runner", "exitCode", cs.State.Terminated.ExitCode)
@@ -381,6 +372,41 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
}
}
func (r *EphemeralRunnerReconciler) deleteEphemeralRunnerOrPod(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
if ephemeralRunner.HasJob() {
log.Error(
errors.New("ephemeral runner has a job assigned, but the pod has failed"),
"Ephemeral runner either has faulty entrypoint or something external killing the runner",
)
log.Info("Deleting the ephemeral runner that has a job assigned but the pod has failed")
if err := r.Delete(ctx, ephemeralRunner); err != nil {
log.Error(err, "Failed to delete the ephemeral runner that has a job assigned but the pod has failed")
return err
}
log.Info("Deleted the ephemeral runner that has a job assigned but the pod has failed")
log.Info("Trying to remove the runner from the service")
actionsClient, err := r.GetActionsService(ctx, ephemeralRunner)
if err != nil {
log.Error(err, "Failed to get actions client for removing the runner from the service")
return nil
}
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
log.Error(err, "Failed to remove the runner from the service")
return nil
}
log.Info("Removed the runner from the service")
return nil
}
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
log.Error(err, "Failed to delete runner pod on failure")
return err
}
return nil
}
func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ok bool, err error) {
if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil {
actionsError := &actions.ActionsError{}
@@ -546,6 +572,8 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
// It should not be responsible for setting the status to Failed.
//
// It should be called by deleteEphemeralRunnerOrPod which is responsible for deciding whether to delete the EphemeralRunner or just the Pod.
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
if pod.DeletionTimestamp.IsZero() {
log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)

View File

@@ -261,6 +261,238 @@ var _ = Describe("EphemeralRunner", func() {
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
})
It("It should delete ephemeral runner when pod failed before runner state is recorded and job assigned", func() {
er := new(v1alpha1.EphemeralRunner)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(Succeed(), "failed to get ephemeral runner")
er.Status.JobID = "1"
err := k8sClient.Status().Update(ctx, er)
Expect(err).To(BeNil(), "failed to update ephemeral runner status")
Eventually(func() (string, error) {
current := new(v1alpha1.EphemeralRunner)
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, current); err != nil {
return "", err
}
return current.Status.JobID, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo("1"))
pod := new(corev1.Pod)
Eventually(func() (bool, error) {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
return false, err
}
return true, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
pod.Status.Phase = corev1.PodFailed
pod.Status.ContainerStatuses = nil
err = k8sClient.Status().Update(ctx, pod)
Expect(err).To(BeNil(), "Failed to update pod status")
Eventually(func() bool {
check := new(v1alpha1.EphemeralRunner)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, check)
return kerrors.IsNotFound(err)
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
})
It("It should delete ephemeral runner when pod failed before runner state is recorded and job not assigned", func() {
pod := new(corev1.Pod)
Eventually(func() (bool, error) {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
return false, err
}
return true, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
oldPodUID := pod.UID
pod.Status.Phase = corev1.PodFailed
pod.Status.ContainerStatuses = nil
err := k8sClient.Status().Update(ctx, pod)
Expect(err).To(BeNil(), "Failed to update pod status")
Eventually(
func() (int, error) {
updated := new(v1alpha1.EphemeralRunner)
err := k8sClient.Get(
ctx,
client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace},
updated,
)
if err != nil {
return 0, err
}
return len(updated.Status.Failures), nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(1))
Eventually(
func() (bool, error) {
newPod := new(corev1.Pod)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, newPod)
if err != nil {
return false, err
}
return newPod.UID != oldPodUID, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeTrue(), "Pod should be re-created")
})
It("It should treat pod failed with runner container exit 0 as success with job id", func() {
er := new(v1alpha1.EphemeralRunner)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(Succeed(), "failed to get ephemeral runner")
er.Status.JobID = "1"
err := k8sClient.Status().Update(ctx, er)
Expect(err).To(BeNil(), "failed to update ephemeral runner status")
pod := new(corev1.Pod)
Eventually(
func() error {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
return err
}
return nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(Succeed(), "failed to get pod")
pod.Status.Phase = corev1.PodFailed
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: v1alpha1.EphemeralRunnerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
})
err = k8sClient.Status().Update(ctx, pod)
Expect(err).To(BeNil(), "Failed to update pod status")
Eventually(
func() bool {
check := new(v1alpha1.EphemeralRunner)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, check)
return kerrors.IsNotFound(err)
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
})
It("It should treat pod failed with runner container exit 0 as success with no job id", func() {
pod := new(corev1.Pod)
Eventually(
func() error {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
return err
}
return nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(Succeed(), "failed to get pod")
pod.Status.Phase = corev1.PodFailed
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: v1alpha1.EphemeralRunnerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
})
err := k8sClient.Status().Update(ctx, pod)
Expect(err).To(BeNil(), "Failed to update pod status")
Eventually(
func() bool {
check := new(v1alpha1.EphemeralRunner)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, check)
return kerrors.IsNotFound(err)
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
})
It("It should mark as failed when job is not assigned and pod is failed", func() {
er := new(v1alpha1.EphemeralRunner)
Eventually(func() error {
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(Succeed(), "failed to get ephemeral runner")
pod := new(corev1.Pod)
Eventually(
func() error {
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
return err
}
return nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(Succeed(), "failed to get pod")
pod.Status.Phase = corev1.PodFailed
oldPodUID := pod.UID
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: v1alpha1.EphemeralRunnerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
},
},
})
err := k8sClient.Status().Update(ctx, pod)
Expect(err).To(BeNil(), "Failed to update pod status")
Eventually(
func() (int, error) {
updated := new(v1alpha1.EphemeralRunner)
err := k8sClient.Get(
ctx,
client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace},
updated,
)
if err != nil {
return 0, err
}
return len(updated.Status.Failures), nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(1))
Eventually(
func() (bool, error) {
newPod := new(corev1.Pod)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, newPod)
if err != nil {
return false, err
}
return newPod.UID != oldPodUID, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeTrue(), "Pod should be re-created")
})
It("It should failed if a pod template is invalid", func() {
invalideEphemeralRunner := newExampleRunner("invalid-ephemeral-runner", autoscalingNS.Name, configSecret.Name)
invalideEphemeralRunner.Spec.Spec.PriorityClassName = "notexist"
@@ -745,6 +977,52 @@ var _ = Describe("EphemeralRunner", func() {
).Should(BeEquivalentTo(true))
})
It("It should re-create pod on reason starting with OutOf", func() {
pod := new(corev1.Pod)
Eventually(
func() (bool, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
if err != nil {
return false, err
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
pod.Status.Phase = corev1.PodFailed
pod.Status.Reason = "OutOfpods"
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: v1alpha1.EphemeralRunnerContainerName,
State: corev1.ContainerState{},
})
err := k8sClient.Status().Update(ctx, pod)
Expect(err).To(BeNil(), "failed to patch pod status")
updated := new(v1alpha1.EphemeralRunner)
Eventually(func() (bool, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
if err != nil {
return false, err
}
return len(updated.Status.Failures) == 1, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
// should re-create after failure
Eventually(
func() (bool, error) {
pod := new(corev1.Pod)
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
return false, err
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(true))
})
It("It should not set the phase to succeeded without pod termination status", func() {
pod := new(corev1.Pod)
Eventually(

View File

@@ -15,10 +15,12 @@ import (
func TestGitHubConfig(t *testing.T) {
t.Run("when given a valid URL", func(t *testing.T) {
tests := []struct {
name string
configURL string
expected *actions.GitHubConfig
}{
{
name: "repository URL",
configURL: "https://github.com/org/repo",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeRepository,
@@ -29,6 +31,7 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "repository URL with trailing slash",
configURL: "https://github.com/org/repo/",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeRepository,
@@ -39,6 +42,7 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "organization URL",
configURL: "https://github.com/org",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
@@ -49,6 +53,7 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "enterprise URL",
configURL: "https://github.com/enterprises/my-enterprise",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeEnterprise,
@@ -59,6 +64,7 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "enterprise URL with trailing slash",
configURL: "https://github.com/enterprises/my-enterprise/",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeEnterprise,
@@ -69,6 +75,7 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "organization URL with www",
configURL: "https://www.github.com/org",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
@@ -79,6 +86,7 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "organization URL with www and trailing slash",
configURL: "https://www.github.com/org/",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
@@ -89,6 +97,7 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "github local URL",
configURL: "https://github.localhost/org",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
@@ -99,6 +108,7 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "github local org URL",
configURL: "https://my-ghes.com/org",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
@@ -109,6 +119,7 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "github local URL with trailing slash",
configURL: "https://my-ghes.com/org/",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
@@ -119,6 +130,7 @@ func TestGitHubConfig(t *testing.T) {
},
},
{
name: "github local URL with ghe.com",
configURL: "https://my-ghes.ghe.com/org/",
expected: &actions.GitHubConfig{
Scope: actions.GitHubScopeOrganization,
@@ -131,7 +143,7 @@ func TestGitHubConfig(t *testing.T) {
}
for _, test := range tests {
t.Run(test.configURL, func(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
parsedURL, err := url.Parse(strings.Trim(test.configURL, "/"))
require.NoError(t, err)
test.expected.ConfigURL = parsedURL

View File

@@ -119,88 +119,84 @@ func TestGitHubAPIError(t *testing.T) {
})
}
func ParseActionsErrorFromResponse(t *testing.T) {
func TestParseActionsErrorFromResponse(t *testing.T) {
t.Run("empty content length", func(t *testing.T) {
response := &http.Response{
ContentLength: 0,
Header: http.Header{
actions.HeaderActionsActivityID: []string{"activity-id"},
},
StatusCode: 404,
Header: http.Header{},
StatusCode: 404,
}
response.Header.Add(actions.HeaderActionsActivityID, "activity-id")
err := actions.ParseActionsErrorFromResponse(response)
require.Error(t, err)
assert.Equal(t, err.(*actions.ActionsError).ActivityID, "activity-id")
assert.Equal(t, err.(*actions.ActionsError).StatusCode, 404)
assert.Equal(t, err.(*actions.ActionsError).Err.Error(), "unknown exception")
assert.Equal(t, "activity-id", err.(*actions.ActionsError).ActivityID)
assert.Equal(t, 404, err.(*actions.ActionsError).StatusCode)
assert.Equal(t, "unknown exception", err.(*actions.ActionsError).Err.Error())
})
t.Run("contains text plain error", func(t *testing.T) {
errorMessage := "example error message"
response := &http.Response{
ContentLength: int64(len(errorMessage)),
Header: http.Header{
actions.HeaderActionsActivityID: []string{"activity-id"},
"Content-Type": []string{"text/plain"},
},
StatusCode: 404,
Body: io.NopCloser(strings.NewReader(errorMessage)),
StatusCode: 404,
Header: http.Header{},
Body: io.NopCloser(strings.NewReader(errorMessage)),
}
response.Header.Add(actions.HeaderActionsActivityID, "activity-id")
response.Header.Add("Content-Type", "text/plain")
err := actions.ParseActionsErrorFromResponse(response)
require.Error(t, err)
var actionsError *actions.ActionsError
assert.ErrorAs(t, err, &actionsError)
assert.Equal(t, actionsError.ActivityID, "activity-id")
assert.Equal(t, actionsError.StatusCode, 404)
assert.Equal(t, actionsError.Err.Error(), errorMessage)
require.ErrorAs(t, err, &actionsError)
assert.Equal(t, "activity-id", actionsError.ActivityID)
assert.Equal(t, 404, actionsError.StatusCode)
assert.Equal(t, errorMessage, actionsError.Err.Error())
})
t.Run("contains json error", func(t *testing.T) {
errorMessage := `{"typeName":"exception-name","message":"example error message"}`
response := &http.Response{
ContentLength: int64(len(errorMessage)),
Header: http.Header{
actions.HeaderActionsActivityID: []string{"activity-id"},
"Content-Type": []string{"application/json"},
},
StatusCode: 404,
Body: io.NopCloser(strings.NewReader(errorMessage)),
Header: http.Header{},
StatusCode: 404,
Body: io.NopCloser(strings.NewReader(errorMessage)),
}
response.Header.Add(actions.HeaderActionsActivityID, "activity-id")
response.Header.Add("Content-Type", "application/json")
err := actions.ParseActionsErrorFromResponse(response)
require.Error(t, err)
var actionsError *actions.ActionsError
assert.ErrorAs(t, err, &actionsError)
assert.Equal(t, actionsError.ActivityID, "activity-id")
assert.Equal(t, actionsError.StatusCode, 404)
require.ErrorAs(t, err, &actionsError)
assert.Equal(t, "activity-id", actionsError.ActivityID)
assert.Equal(t, 404, actionsError.StatusCode)
inner, ok := actionsError.Err.(*actions.ActionsExceptionError)
require.True(t, ok)
assert.Equal(t, inner.ExceptionName, "exception-name")
assert.Equal(t, inner.Message, "example error message")
assert.Equal(t, "exception-name", inner.ExceptionName)
assert.Equal(t, "example error message", inner.Message)
})
t.Run("wrapped exception error", func(t *testing.T) {
errorMessage := `{"typeName":"exception-name","message":"example error message"}`
response := &http.Response{
ContentLength: int64(len(errorMessage)),
Header: http.Header{
actions.HeaderActionsActivityID: []string{"activity-id"},
"Content-Type": []string{"application/json"},
},
StatusCode: 404,
Body: io.NopCloser(strings.NewReader(errorMessage)),
Header: http.Header{},
StatusCode: 404,
Body: io.NopCloser(strings.NewReader(errorMessage)),
}
response.Header.Add(actions.HeaderActionsActivityID, "activity-id")
response.Header.Add("Content-Type", "application/json")
err := actions.ParseActionsErrorFromResponse(response)
require.Error(t, err)
var actionsExceptionError *actions.ActionsExceptionError
assert.ErrorAs(t, err, &actionsExceptionError)
require.ErrorAs(t, err, &actionsExceptionError)
assert.Equal(t, actionsExceptionError.ExceptionName, "exception-name")
assert.Equal(t, actionsExceptionError.Message, "example error message")
assert.Equal(t, "exception-name", actionsExceptionError.ExceptionName)
assert.Equal(t, "example error message", actionsExceptionError.Message)
})
}

32
go.mod
View File

@@ -16,25 +16,25 @@ require (
github.com/google/uuid v1.6.0
github.com/gorilla/mux v1.8.1
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
github.com/gruntwork-io/terratest v0.53.0
github.com/gruntwork-io/terratest v0.54.0
github.com/hashicorp/go-retryablehttp v0.7.8
github.com/kelseyhightower/envconfig v1.4.0
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.27.2
github.com/onsi/gomega v1.38.2
github.com/onsi/ginkgo/v2 v2.27.3
github.com/onsi/gomega v1.38.3
github.com/prometheus/client_golang v1.23.2
github.com/stretchr/testify v1.11.1
github.com/teambition/rrule-go v1.8.2
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
golang.org/x/net v0.47.0
golang.org/x/oauth2 v0.33.0
golang.org/x/sync v0.18.0
go.uber.org/zap v1.27.1
golang.org/x/net v0.48.0
golang.org/x/oauth2 v0.34.0
golang.org/x/sync v0.19.0
gomodules.xyz/jsonpatch/v2 v2.5.0
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.34.2
k8s.io/apimachinery v0.34.2
k8s.io/client-go v0.34.2
k8s.io/api v0.35.0
k8s.io/apimachinery v0.35.0
k8s.io/client-go v0.35.0
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4
sigs.k8s.io/controller-runtime v0.22.4
sigs.k8s.io/yaml v1.6.0
@@ -165,14 +165,14 @@ require (
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/crypto v0.46.0 // indirect
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/term v0.37.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/mod v0.30.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/term v0.38.0 // indirect
golang.org/x/text v0.32.0 // indirect
golang.org/x/time v0.13.0 // indirect
golang.org/x/tools v0.38.0 // indirect
golang.org/x/tools v0.39.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect

68
go.sum
View File

@@ -233,8 +233,8 @@ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5T
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/gruntwork-io/go-commons v0.17.2 h1:14dsCJ7M5Vv2X3BIPKeG9Kdy6vTMGhM8L4WZazxfTuY=
github.com/gruntwork-io/go-commons v0.17.2/go.mod h1:zs7Q2AbUKuTarBPy19CIxJVUX/rBamfW8IwuWKniWkE=
github.com/gruntwork-io/terratest v0.53.0 h1:r5U3nfrQCTGvnlJIIh6R5g8z8dwRcjNESYO/wYyOXsI=
github.com/gruntwork-io/terratest v0.53.0/go.mod h1:y2Evi+Ac04QpzF3mbRPqrBjipDN7gjqlw6+OZoy2vX4=
github.com/gruntwork-io/terratest v0.54.0 h1:JOVATYDpU0NAPbEkgYUP50BR2m45UGiR4dbs20sKzck=
github.com/gruntwork-io/terratest v0.54.0/go.mod h1:QvwQWZMTJmJB4E0d1Uc18quQm7+X53liKKp+fJSuaKA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -316,12 +316,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8=
github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM=
github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -339,8 +339,8 @@ github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9Z
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
@@ -382,8 +382,8 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
@@ -391,30 +391,30 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 h1:TQwNpfvNkxAVlItJf6Cr5JTsVZoC/Sj7K3OZv2Pc14A=
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -426,14 +426,14 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -441,8 +441,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -475,14 +475,14 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY=
k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw=
k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI=
k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc=
k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4=
k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M=
k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE=
k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=