mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-24 10:37:32 +08:00
Compare commits
2 Commits
gha-runner
...
cf05042cfb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cf05042cfb | ||
|
|
51023ade49 |
215
.github/actions/execute-assert-arc-e2e/action.yaml
vendored
Normal file
215
.github/actions/execute-assert-arc-e2e/action.yaml
vendored
Normal file
@@ -0,0 +1,215 @@
|
||||
name: 'Execute and Assert ARC E2E Test Action'
|
||||
description: 'Queue E2E test workflow and assert workflow run result to be succeed'
|
||||
|
||||
inputs:
|
||||
auth-token:
|
||||
description: 'GitHub access token to queue workflow run'
|
||||
required: true
|
||||
repo-owner:
|
||||
description: "The repository owner name that has the test workflow file, ex: actions"
|
||||
required: true
|
||||
repo-name:
|
||||
description: "The repository name that has the test workflow file, ex: test"
|
||||
required: true
|
||||
workflow-file:
|
||||
description: 'The file name of the workflow yaml, ex: test.yml'
|
||||
required: true
|
||||
arc-name:
|
||||
description: 'The name of the configured gha-runner-scale-set'
|
||||
required: true
|
||||
arc-namespace:
|
||||
description: 'The namespace of the configured gha-runner-scale-set'
|
||||
required: true
|
||||
arc-controller-namespace:
|
||||
description: 'The namespace of the configured gha-runner-scale-set-controller'
|
||||
required: true
|
||||
wait-to-finish:
|
||||
description: 'Wait for the workflow run to finish'
|
||||
required: true
|
||||
default: "true"
|
||||
wait-to-running:
|
||||
description: 'Wait for the workflow run to start running'
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Queue test workflow
|
||||
shell: bash
|
||||
id: queue_workflow
|
||||
run: |
|
||||
queue_time=`date +%FT%TZ`
|
||||
echo "queue_time=$queue_time" >> $GITHUB_OUTPUT
|
||||
curl -X POST https://api.github.com/repos/${{inputs.repo-owner}}/${{inputs.repo-name}}/actions/workflows/${{inputs.workflow-file}}/dispatches \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
-H "Authorization: token ${{inputs.auth-token}}" \
|
||||
-d '{"ref": "main", "inputs": { "arc_name": "${{inputs.arc-name}}" } }'
|
||||
|
||||
- name: Fetch workflow run & job ids
|
||||
uses: actions/github-script@v7
|
||||
id: query_workflow
|
||||
with:
|
||||
script: |
|
||||
// Try to find the workflow run triggered by the previous step using the workflow_dispatch event.
|
||||
// - Find recently create workflow runs in the test repository
|
||||
// - For each workflow run, list its workflow job and see if the job's labels contain `inputs.arc-name`
|
||||
// - Since the inputs.arc-name should be unique per e2e workflow run, once we find the job with the label, we find the workflow that we just triggered.
|
||||
function sleep(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, ms))
|
||||
}
|
||||
const owner = '${{inputs.repo-owner}}'
|
||||
const repo = '${{inputs.repo-name}}'
|
||||
const workflow_id = '${{inputs.workflow-file}}'
|
||||
let workflow_run_id = 0
|
||||
let workflow_job_id = 0
|
||||
let workflow_run_html_url = ""
|
||||
let count = 0
|
||||
while (count++<12) {
|
||||
await sleep(10 * 1000);
|
||||
let listRunResponse = await github.rest.actions.listWorkflowRuns({
|
||||
owner: owner,
|
||||
repo: repo,
|
||||
workflow_id: workflow_id,
|
||||
created: '>${{steps.queue_workflow.outputs.queue_time}}'
|
||||
})
|
||||
if (listRunResponse.data.total_count > 0) {
|
||||
console.log(`Found some new workflow runs for ${workflow_id}`)
|
||||
for (let i = 0; i<listRunResponse.data.total_count; i++) {
|
||||
let workflowRun = listRunResponse.data.workflow_runs[i]
|
||||
console.log(`Check if workflow run ${workflowRun.id} is triggered by us.`)
|
||||
let listJobResponse = await github.rest.actions.listJobsForWorkflowRun({
|
||||
owner: owner,
|
||||
repo: repo,
|
||||
run_id: workflowRun.id
|
||||
})
|
||||
console.log(`Workflow run ${workflowRun.id} has ${listJobResponse.data.total_count} jobs.`)
|
||||
if (listJobResponse.data.total_count > 0) {
|
||||
for (let j = 0; j<listJobResponse.data.total_count; j++) {
|
||||
let workflowJob = listJobResponse.data.jobs[j]
|
||||
console.log(`Check if workflow job ${workflowJob.id} is triggered by us.`)
|
||||
console.log(JSON.stringify(workflowJob.labels));
|
||||
if (workflowJob.labels.includes('${{inputs.arc-name}}')) {
|
||||
console.log(`Workflow job ${workflowJob.id} (Run id: ${workflowJob.run_id}) is triggered by us.`)
|
||||
workflow_run_id = workflowJob.run_id
|
||||
workflow_job_id = workflowJob.id
|
||||
workflow_run_html_url = workflowRun.html_url
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (workflow_job_id > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (workflow_job_id > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (workflow_job_id == 0) {
|
||||
core.setFailed(`Can't find workflow run and workflow job triggered to 'runs-on ${{inputs.arc-name}}'`)
|
||||
} else {
|
||||
core.setOutput('workflow_run', workflow_run_id);
|
||||
core.setOutput('workflow_job', workflow_job_id);
|
||||
core.setOutput('workflow_run_url', workflow_run_html_url);
|
||||
}
|
||||
|
||||
- name: Generate summary about the triggered workflow run
|
||||
shell: bash
|
||||
run: |
|
||||
cat <<-EOF > $GITHUB_STEP_SUMMARY
|
||||
| **Triggered workflow run** |
|
||||
|:--------------------------:|
|
||||
| ${{steps.query_workflow.outputs.workflow_run_url}} |
|
||||
EOF
|
||||
|
||||
- name: Wait for workflow to start running
|
||||
if: inputs.wait-to-running == 'true' && inputs.wait-to-finish == 'false'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
function sleep(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, ms))
|
||||
}
|
||||
const owner = '${{inputs.repo-owner}}'
|
||||
const repo = '${{inputs.repo-name}}'
|
||||
const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}}
|
||||
const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}}
|
||||
let count = 0
|
||||
while (count++<10) {
|
||||
await sleep(30 * 1000);
|
||||
let getRunResponse = await github.rest.actions.getWorkflowRun({
|
||||
owner: owner,
|
||||
repo: repo,
|
||||
run_id: workflow_run_id
|
||||
})
|
||||
console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`);
|
||||
if (getRunResponse.data.status == 'in_progress') {
|
||||
console.log(`Workflow run is in progress.`)
|
||||
return
|
||||
}
|
||||
}
|
||||
core.setFailed(`The triggered workflow run didn't start properly using ${{inputs.arc-name}}`)
|
||||
|
||||
- name: Wait for workflow to finish successfully
|
||||
if: inputs.wait-to-finish == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
// Wait 5 minutes and make sure the workflow run we triggered completed with result 'success'
|
||||
function sleep(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, ms))
|
||||
}
|
||||
const owner = '${{inputs.repo-owner}}'
|
||||
const repo = '${{inputs.repo-name}}'
|
||||
const workflow_run_id = ${{steps.query_workflow.outputs.workflow_run}}
|
||||
const workflow_job_id = ${{steps.query_workflow.outputs.workflow_job}}
|
||||
let count = 0
|
||||
while (count++<10) {
|
||||
await sleep(30 * 1000);
|
||||
let getRunResponse = await github.rest.actions.getWorkflowRun({
|
||||
owner: owner,
|
||||
repo: repo,
|
||||
run_id: workflow_run_id
|
||||
})
|
||||
console.log(`${getRunResponse.data.html_url}: ${getRunResponse.data.status} (${getRunResponse.data.conclusion})`);
|
||||
if (getRunResponse.data.status == 'completed') {
|
||||
if ( getRunResponse.data.conclusion == 'success') {
|
||||
console.log(`Workflow run finished properly.`)
|
||||
return
|
||||
} else {
|
||||
core.setFailed(`The triggered workflow run finish with result ${getRunResponse.data.conclusion}`)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
core.setFailed(`The triggered workflow run didn't finish properly using ${{inputs.arc-name}}`)
|
||||
|
||||
- name: Gather listener logs
|
||||
shell: bash
|
||||
if: always()
|
||||
run: |
|
||||
LISTENER_POD="$(kubectl get autoscalinglisteners.actions.github.com -n arc-systems -o jsonpath='{.items[*].metadata.name}')"
|
||||
kubectl logs $LISTENER_POD -n ${{inputs.arc-controller-namespace}}
|
||||
|
||||
- name: Gather coredns logs
|
||||
shell: bash
|
||||
if: always()
|
||||
run: |
|
||||
kubectl logs deployments/coredns -n kube-system
|
||||
|
||||
- name: cleanup
|
||||
if: inputs.wait-to-finish == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
helm uninstall ${{ inputs.arc-name }} --namespace ${{inputs.arc-namespace}} --debug
|
||||
kubectl wait --timeout=30s --for=delete AutoScalingRunnerSet -n ${{inputs.arc-namespace}} -l app.kubernetes.io/instance=${{ inputs.arc-name }}
|
||||
|
||||
- name: Gather controller logs
|
||||
shell: bash
|
||||
if: always()
|
||||
run: |
|
||||
kubectl logs deployment/arc-gha-rs-controller -n ${{inputs.arc-controller-namespace}}
|
||||
65
.github/actions/setup-arc-e2e/action.yaml
vendored
Normal file
65
.github/actions/setup-arc-e2e/action.yaml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: "Setup ARC E2E Test Action"
|
||||
description: "Build controller image, create kind cluster, load the image, and exchange ARC configure token."
|
||||
|
||||
inputs:
|
||||
app-id:
|
||||
description: "GitHub App Id for exchange access token"
|
||||
required: true
|
||||
app-pk:
|
||||
description: "GitHub App private key for exchange access token"
|
||||
required: true
|
||||
image-name:
|
||||
description: "Local docker image name for building"
|
||||
required: true
|
||||
image-tag:
|
||||
description: "Tag of ARC Docker image for building"
|
||||
required: true
|
||||
target-org:
|
||||
description: "The test organization for ARC e2e test"
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
token:
|
||||
description: "Token to use for configure ARC"
|
||||
value: ${{steps.config-token.outputs.token}}
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
|
||||
with:
|
||||
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||
# BuildKit v0.11 which has a bug causing intermittent
|
||||
# failures pushing images to GHCR
|
||||
version: v0.9.1
|
||||
driver-opts: image=moby/buildkit:v0.10.6
|
||||
|
||||
- name: Build controller image
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.18.0
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64
|
||||
load: true
|
||||
build-args: |
|
||||
DOCKER_IMAGE_NAME=${{inputs.image-name}}
|
||||
VERSION=${{inputs.image-tag}}
|
||||
tags: |
|
||||
${{inputs.image-name}}:${{inputs.image-tag}}
|
||||
no-cache: true
|
||||
|
||||
- name: Create minikube cluster and load image
|
||||
shell: bash
|
||||
run: |
|
||||
minikube start
|
||||
minikube image load ${{inputs.image-name}}:${{inputs.image-tag}}
|
||||
|
||||
- name: Get configure token
|
||||
id: config-token
|
||||
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ inputs.app-id }}
|
||||
application_private_key: ${{ inputs.app-pk }}
|
||||
organization: ${{ inputs.target-org}}
|
||||
51
.github/actions/setup-docker-environment/action.yaml
vendored
Normal file
51
.github/actions/setup-docker-environment/action.yaml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: "Setup Docker"
|
||||
|
||||
inputs:
|
||||
username:
|
||||
description: "Username"
|
||||
required: true
|
||||
password:
|
||||
description: "Password"
|
||||
required: true
|
||||
ghcr_username:
|
||||
description: "GHCR username. Usually set from the github.actor variable"
|
||||
required: true
|
||||
ghcr_password:
|
||||
description: "GHCR password. Usually set from the secrets.GITHUB_TOKEN variable"
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Get Short SHA
|
||||
id: vars
|
||||
run: |
|
||||
echo "sha_short=${GITHUB_SHA::7}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Set up QEMU
|
||||
# https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
# https://github.com/docker/setup-buildx-action/releases/tag/v3.10.0
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }}
|
||||
# https://github.com/docker/login-action/releases/tag/v3.4.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
|
||||
with:
|
||||
username: ${{ inputs.username }}
|
||||
password: ${{ inputs.password }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }}
|
||||
# https://github.com/docker/login-action/releases/tag/v3.4.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ inputs.ghcr_username }}
|
||||
password: ${{ inputs.ghcr_password }}
|
||||
6
.github/workflows/arc-publish-chart.yaml
vendored
6
.github/workflows/arc-publish-chart.yaml
vendored
@@ -40,7 +40,7 @@ jobs:
|
||||
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -134,7 +134,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -184,7 +184,7 @@ jobs:
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
- name: Checkout target repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||
path: ${{ env.CHART_TARGET_REPO }}
|
||||
|
||||
2
.github/workflows/arc-publish.yaml
vendored
2
.github/workflows/arc-publish.yaml
vendored
@@ -39,7 +39,7 @@ jobs:
|
||||
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
|
||||
2
.github/workflows/arc-release-runners.yaml
vendored
2
.github/workflows/arc-release-runners.yaml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
name: Trigger Build and Push of Runner Images
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
- name: Get runner version
|
||||
id: versions
|
||||
run: |
|
||||
|
||||
@@ -24,7 +24,7 @@ jobs:
|
||||
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
|
||||
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Get runner current and latest versions
|
||||
id: runner_versions
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
|
||||
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
|
||||
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: PR Name
|
||||
id: pr_name
|
||||
@@ -124,7 +124,7 @@ jobs:
|
||||
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: New branch
|
||||
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
|
||||
|
||||
2
.github/workflows/arc-validate-chart.yaml
vendored
2
.github/workflows/arc-validate-chart.yaml
vendored
@@ -40,7 +40,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
4
.github/workflows/arc-validate-runners.yaml
vendored
4
.github/workflows/arc-validate-runners.yaml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
name: runner / shellcheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
- name: "Run shellcheck"
|
||||
run: make shellcheck
|
||||
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
|
||||
1022
.github/workflows/gha-e2e-tests.yaml
vendored
1022
.github/workflows/gha-e2e-tests.yaml
vendored
File diff suppressed because it is too large
Load Diff
6
.github/workflows/gha-publish-chart.yaml
vendored
6
.github/workflows/gha-publish-chart.yaml
vendored
@@ -45,7 +45,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
@@ -166,7 +166,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# If inputs.ref is empty, it'll resolve to the default branch
|
||||
ref: ${{ inputs.ref }}
|
||||
|
||||
4
.github/workflows/gha-validate-chart.yaml
vendored
4
.github/workflows/gha-validate-chart.yaml
vendored
@@ -36,7 +36,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -111,7 +111,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
|
||||
4
.github/workflows/global-publish-canary.yaml
vendored
4
.github/workflows/global-publish-canary.yaml
vendored
@@ -55,7 +55,7 @@ jobs:
|
||||
TARGET_REPO: actions-runner-controller
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
@@ -90,7 +90,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef
|
||||
|
||||
2
.github/workflows/global-run-codeql.yaml
vendored
2
.github/workflows/global-run-codeql.yaml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
security-events: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v6
|
||||
|
||||
@@ -16,7 +16,7 @@ jobs:
|
||||
check_for_first_interaction:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/first-interaction@v3
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
10
.github/workflows/go.yaml
vendored
10
.github/workflows/go.yaml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
@@ -42,13 +42,13 @@ jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
cache: false
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20
|
||||
uses: golangci/golangci-lint-action@0a35821d5c230e903fcfe077583637dea1b27b47
|
||||
with:
|
||||
only-new-issues: true
|
||||
version: v2.5.0
|
||||
@@ -56,7 +56,7 @@ jobs:
|
||||
generate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.13.1
|
||||
version: 0.13.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.13.1"
|
||||
appVersion: "0.13.0"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.13.1
|
||||
version: 0.13.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.13.1"
|
||||
appVersion: "0.13.0"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -211,14 +211,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
// TODO: make sure the role binding has the up-to-date role and service account
|
||||
|
||||
listenerPod := new(corev1.Pod)
|
||||
if err := r.Get(
|
||||
ctx,
|
||||
client.ObjectKey{
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
Name: autoscalingListener.Name,
|
||||
},
|
||||
listenerPod,
|
||||
); err != nil {
|
||||
if err := r.Get(ctx, client.ObjectKey{Namespace: autoscalingListener.Namespace, Name: autoscalingListener.Name}, listenerPod); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to get listener pod", "namespace", autoscalingListener.Namespace, "name", autoscalingListener.Name)
|
||||
return ctrl.Result{}, err
|
||||
@@ -236,30 +229,37 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
|
||||
cs := listenerContainerStatus(listenerPod)
|
||||
switch {
|
||||
case listenerPod.Status.Reason == "Evicted":
|
||||
log.Info(
|
||||
"Listener pod is evicted",
|
||||
"phase", listenerPod.Status.Phase,
|
||||
"reason", listenerPod.Status.Reason,
|
||||
"message", listenerPod.Status.Message,
|
||||
)
|
||||
|
||||
return ctrl.Result{}, r.deleteListenerPod(ctx, autoscalingListener, listenerPod, log)
|
||||
|
||||
case cs == nil:
|
||||
log.Info("Listener pod is not ready", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
return ctrl.Result{}, nil
|
||||
case cs.State.Terminated != nil:
|
||||
log.Info(
|
||||
"Listener pod is terminated",
|
||||
"namespace", listenerPod.Namespace,
|
||||
"name", listenerPod.Name,
|
||||
"reason", cs.State.Terminated.Reason,
|
||||
"message", cs.State.Terminated.Message,
|
||||
)
|
||||
log.Info("Listener pod is terminated", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", cs.State.Terminated.Reason, "message", cs.State.Terminated.Message)
|
||||
|
||||
return ctrl.Result{}, r.deleteListenerPod(ctx, autoscalingListener, listenerPod, log)
|
||||
if err := r.publishRunningListener(autoscalingListener, false); err != nil {
|
||||
log.Error(err, "Unable to publish runner listener down metric", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
}
|
||||
|
||||
if listenerPod.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// delete the listener config secret as well, so it gets recreated when the listener pod is recreated, with any new data if it exists
|
||||
var configSecret corev1.Secret
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &configSecret)
|
||||
switch {
|
||||
case err == nil && configSecret.DeletionTimestamp.IsZero():
|
||||
log.Info("Deleting the listener config secret")
|
||||
if err := r.Delete(ctx, &configSecret); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to delete listener config secret: %w", err)
|
||||
}
|
||||
case !kerrors.IsNotFound(err):
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get the listener config secret: %w", err)
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
case cs.State.Running != nil:
|
||||
if err := r.publishRunningListener(autoscalingListener, true); err != nil {
|
||||
log.Error(err, "Unable to publish running listener", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
@@ -269,39 +269,10 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) deleteListenerPod(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerPod *corev1.Pod, log logr.Logger) error {
|
||||
if err := r.publishRunningListener(autoscalingListener, false); err != nil {
|
||||
log.Error(err, "Unable to publish runner listener down metric", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
}
|
||||
|
||||
if listenerPod.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
// delete the listener config secret as well, so it gets recreated when the listener pod is recreated, with any new data if it exists
|
||||
var configSecret corev1.Secret
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &configSecret)
|
||||
switch {
|
||||
case err == nil && configSecret.DeletionTimestamp.IsZero():
|
||||
log.Info("Deleting the listener config secret")
|
||||
if err := r.Delete(ctx, &configSecret); err != nil {
|
||||
return fmt.Errorf("failed to delete listener config secret: %w", err)
|
||||
}
|
||||
case !kerrors.IsNotFound(err):
|
||||
return fmt.Errorf("failed to get the listener config secret: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (requeue bool, err error) {
|
||||
logger.Info("Cleaning up the listener pod")
|
||||
listenerPod := new(corev1.Pod)
|
||||
|
||||
@@ -671,55 +671,6 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
||||
autoscalingListenerTestInterval,
|
||||
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be created")
|
||||
})
|
||||
|
||||
It("Should re-create pod when the listener pod is evicted", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
client.ObjectKey{
|
||||
Name: autoscalingListener.Name,
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
},
|
||||
pod,
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return pod.Name, nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).Should(
|
||||
BeEquivalentTo(autoscalingListener.Name),
|
||||
"Pod should be created",
|
||||
)
|
||||
|
||||
updated := pod.DeepCopy()
|
||||
oldPodUID := string(pod.UID)
|
||||
updated.Status.Reason = "Evicted"
|
||||
err := k8sClient.Status().Update(ctx, updated)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update pod status")
|
||||
|
||||
pod = new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(pod.UID), nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).ShouldNot(
|
||||
BeEquivalentTo(oldPodUID),
|
||||
"Pod should be created",
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -312,46 +312,26 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
|
||||
cs := runnerContainerStatus(pod)
|
||||
switch {
|
||||
case pod.Status.Phase == corev1.PodFailed: // All containers are stopped
|
||||
log.Info("Pod is in failed phase, inspecting runner container status",
|
||||
"podReason", pod.Status.Reason,
|
||||
"podMessage", pod.Status.Message,
|
||||
"podConditions", pod.Status.Conditions,
|
||||
)
|
||||
// If the runner pod did not have chance to start, terminated state may not be set.
|
||||
// Therefore, we should try to restart it.
|
||||
if cs == nil || cs.State.Terminated == nil {
|
||||
log.Info("Runner container does not have state set, deleting pod as failed so it can be restarted")
|
||||
return ctrl.Result{}, r.deleteEphemeralRunnerOrPod(ctx, ephemeralRunner, pod, log)
|
||||
}
|
||||
case cs == nil:
|
||||
// starting, no container state yet
|
||||
log.Info("Waiting for runner container status to be available")
|
||||
return ctrl.Result{}, nil
|
||||
case cs.State.Terminated == nil: // still running or evicted
|
||||
if pod.Status.Phase == corev1.PodFailed && pod.Status.Reason == "Evicted" {
|
||||
log.Info("Pod set the termination phase, but container state is not terminated. Deleting pod",
|
||||
"PodPhase", pod.Status.Phase,
|
||||
"PodReason", pod.Status.Reason,
|
||||
"PodMessage", pod.Status.Message,
|
||||
)
|
||||
|
||||
if cs.State.Terminated.ExitCode == 0 {
|
||||
log.Info("Runner container has succeeded but pod is in failed phase; Assume successful exit")
|
||||
// If the pod is in a failed state, that means that at least one container exited with non-zero exit code.
|
||||
// If the runner container exits with 0, we assume that the runner has finished successfully.
|
||||
// If side-car container exits with non-zero, it shouldn't affect the runner. Runner exit code
|
||||
// drives the controller's inference of whether the job has succeeded or failed.
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
log.Error(err, "Failed to delete ephemeral runner after successful completion")
|
||||
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Error(err, "failed to delete pod as failed on pod.Status.Phase: Failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
log.Error(
|
||||
errors.New("ephemeral runner container has failed, with runner container exit code non-zero"),
|
||||
"Ephemeral runner container has failed, and runner container termination exit code is non-zero",
|
||||
"containerTerminatedState", cs.State.Terminated,
|
||||
)
|
||||
return ctrl.Result{}, r.deleteEphemeralRunnerOrPod(ctx, ephemeralRunner, pod, log)
|
||||
|
||||
case cs == nil:
|
||||
// starting, no container state yet
|
||||
log.Info("Waiting for runner container status to be available")
|
||||
return ctrl.Result{}, nil
|
||||
|
||||
case cs.State.Terminated == nil: // container is not terminated and pod phase is not failed, so runner is still running
|
||||
log.Info("Runner container is still running; updating ephemeral runner status")
|
||||
log.Info("Ephemeral runner container is still running")
|
||||
if err := r.updateRunStatusFromPod(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Info("Failed to update ephemeral runner status. Requeue to not miss this event")
|
||||
return ctrl.Result{}, err
|
||||
@@ -360,7 +340,36 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
|
||||
case cs.State.Terminated.ExitCode != 0: // failed
|
||||
log.Info("Ephemeral runner container failed", "exitCode", cs.State.Terminated.ExitCode)
|
||||
return ctrl.Result{}, r.deleteEphemeralRunnerOrPod(ctx, ephemeralRunner, pod, log)
|
||||
if ephemeralRunner.HasJob() {
|
||||
log.Error(
|
||||
errors.New("ephemeral runner has a job assigned, but the pod has failed"),
|
||||
"Ephemeral runner either has faulty entrypoint or something external killing the runner",
|
||||
)
|
||||
log.Info("Deleting the ephemeral runner that has a job assigned but the pod has failed")
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
log.Error(err, "Failed to delete the ephemeral runner that has a job assigned but the pod has failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Deleted the ephemeral runner that has a job assigned but the pod has failed")
|
||||
log.Info("Trying to remove the runner from the service")
|
||||
actionsClient, err := r.GetActionsService(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to get actions client for removing the runner from the service")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
|
||||
log.Error(err, "Failed to remove the runner from the service")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
log.Info("Removed the runner from the service")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Error(err, "Failed to delete runner pod on failure")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
|
||||
default: // succeeded
|
||||
log.Info("Ephemeral runner has finished successfully, deleting ephemeral runner", "exitCode", cs.State.Terminated.ExitCode)
|
||||
@@ -372,41 +381,6 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
}
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) deleteEphemeralRunnerOrPod(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
||||
if ephemeralRunner.HasJob() {
|
||||
log.Error(
|
||||
errors.New("ephemeral runner has a job assigned, but the pod has failed"),
|
||||
"Ephemeral runner either has faulty entrypoint or something external killing the runner",
|
||||
)
|
||||
log.Info("Deleting the ephemeral runner that has a job assigned but the pod has failed")
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
log.Error(err, "Failed to delete the ephemeral runner that has a job assigned but the pod has failed")
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("Deleted the ephemeral runner that has a job assigned but the pod has failed")
|
||||
log.Info("Trying to remove the runner from the service")
|
||||
actionsClient, err := r.GetActionsService(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to get actions client for removing the runner from the service")
|
||||
return nil
|
||||
}
|
||||
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
|
||||
log.Error(err, "Failed to remove the runner from the service")
|
||||
return nil
|
||||
}
|
||||
log.Info("Removed the runner from the service")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Error(err, "Failed to delete runner pod on failure")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ok bool, err error) {
|
||||
if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil {
|
||||
actionsError := &actions.ActionsError{}
|
||||
@@ -572,8 +546,6 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
|
||||
|
||||
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
|
||||
// It should not be responsible for setting the status to Failed.
|
||||
//
|
||||
// It should be called by deleteEphemeralRunnerOrPod which is responsible for deciding whether to delete the EphemeralRunner or just the Pod.
|
||||
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
||||
if pod.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)
|
||||
|
||||
@@ -261,238 +261,6 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
|
||||
})
|
||||
|
||||
It("It should delete ephemeral runner when pod failed before runner state is recorded and job assigned", func() {
|
||||
er := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(Succeed(), "failed to get ephemeral runner")
|
||||
|
||||
er.Status.JobID = "1"
|
||||
err := k8sClient.Status().Update(ctx, er)
|
||||
Expect(err).To(BeNil(), "failed to update ephemeral runner status")
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
current := new(v1alpha1.EphemeralRunner)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, current); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return current.Status.JobID, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo("1"))
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
pod.Status.ContainerStatuses = nil
|
||||
err = k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
Eventually(func() bool {
|
||||
check := new(v1alpha1.EphemeralRunner)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, check)
|
||||
return kerrors.IsNotFound(err)
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
|
||||
})
|
||||
|
||||
It("It should delete ephemeral runner when pod failed before runner state is recorded and job not assigned", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||
|
||||
oldPodUID := pod.UID
|
||||
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
pod.Status.ContainerStatuses = nil
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace},
|
||||
updated,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(updated.Status.Failures), nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(1))
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
newPod := new(corev1.Pod)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, newPod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return newPod.UID != oldPodUID, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue(), "Pod should be re-created")
|
||||
})
|
||||
|
||||
It("It should treat pod failed with runner container exit 0 as success with job id", func() {
|
||||
er := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(Succeed(), "failed to get ephemeral runner")
|
||||
|
||||
er.Status.JobID = "1"
|
||||
err := k8sClient.Status().Update(ctx, er)
|
||||
Expect(err).To(BeNil(), "failed to update ephemeral runner status")
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() error {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(Succeed(), "failed to get pod")
|
||||
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
err = k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
check := new(v1alpha1.EphemeralRunner)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, check)
|
||||
return kerrors.IsNotFound(err)
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
|
||||
})
|
||||
|
||||
It("It should treat pod failed with runner container exit 0 as success with no job id", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() error {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(Succeed(), "failed to get pod")
|
||||
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
check := new(v1alpha1.EphemeralRunner)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, check)
|
||||
return kerrors.IsNotFound(err)
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
|
||||
})
|
||||
|
||||
It("It should mark as failed when job is not assigned and pod is failed", func() {
|
||||
er := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, er)
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(Succeed(), "failed to get ephemeral runner")
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() error {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(Succeed(), "failed to get pod")
|
||||
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
oldPodUID := pod.UID
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace},
|
||||
updated,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(updated.Status.Failures), nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(1))
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
newPod := new(corev1.Pod)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, newPod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return newPod.UID != oldPodUID, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue(), "Pod should be re-created")
|
||||
})
|
||||
|
||||
It("It should failed if a pod template is invalid", func() {
|
||||
invalideEphemeralRunner := newExampleRunner("invalid-ephemeral-runner", autoscalingNS.Name, configSecret.Name)
|
||||
invalideEphemeralRunner.Spec.Spec.PriorityClassName = "notexist"
|
||||
@@ -977,52 +745,6 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should re-create pod on reason starting with OutOf", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
pod.Status.Reason = "OutOfpods"
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{},
|
||||
})
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "failed to patch pod status")
|
||||
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(updated.Status.Failures) == 1, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||
|
||||
// should re-create after failure
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
pod := new(corev1.Pod)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should not set the phase to succeeded without pod termination status", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
|
||||
@@ -43,33 +43,6 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
|
||||
|
||||
## Changelog
|
||||
|
||||
### 0.13.1
|
||||
|
||||
1. Make restart pod more flexible to different failure scenarios [#4340](https://github.com/actions/actions-runner-controller/pull/4340)
|
||||
1. Bump golangci/golangci-lint-action from 9.1.0 to 9.2.0 in the actions group [#4335](https://github.com/actions/actions-runner-controller/pull/4335)
|
||||
1. Bump the gomod group across 1 directory with 10 updates [#4338](https://github.com/actions/actions-runner-controller/pull/4338)
|
||||
1. Re-schedule if the failed reason starts with OutOf [#4336](https://github.com/actions/actions-runner-controller/pull/4336)
|
||||
1. Restart the listener if pod is evicted [#4332](https://github.com/actions/actions-runner-controller/pull/4332)
|
||||
1. Typo in test name caused test to not execute [#4330](https://github.com/actions/actions-runner-controller/pull/4330)
|
||||
1. Bump the actions group with 3 updates [#4328](https://github.com/actions/actions-runner-controller/pull/4328)
|
||||
1. Remove old e2e tests [#4325](https://github.com/actions/actions-runner-controller/pull/4325)
|
||||
1. Bump the actions group across 1 directory with 4 updates [#4309](https://github.com/actions/actions-runner-controller/pull/4309)
|
||||
1. Bump golang.org/x/crypto from 0.43.0 to 0.45.0 [#4318](https://github.com/actions/actions-runner-controller/pull/4318)
|
||||
1. Add support for giving kubernetes mode scaleset service account additional permissions [#4282](https://github.com/actions/actions-runner-controller/pull/4282)
|
||||
1. Bump the gomod group across 1 directory with 11 updates [#4317](https://github.com/actions/actions-runner-controller/pull/4317)
|
||||
1. Code style changes on the controller [#4324](https://github.com/actions/actions-runner-controller/pull/4324)
|
||||
1. Add ephemeral runner finalizer during creation and check finalizer without requeue [#4320](https://github.com/actions/actions-runner-controller/pull/4320)
|
||||
1. e2e: move from deprecated openebs charts to new registry [#4321](https://github.com/actions/actions-runner-controller/pull/4321)
|
||||
1. Create e2e test suite [#3136](https://github.com/actions/actions-runner-controller/pull/3136)
|
||||
1. Handle resource quota on status forbidden by retrying [#4305](https://github.com/actions/actions-runner-controller/pull/4305)
|
||||
1. Use combination of namespace, GitHub URL, and runner group when hashing the listener name [#4299](https://github.com/actions/actions-runner-controller/pull/4299)
|
||||
1. Bump kubebuilder tools in the workflow [#4300](https://github.com/actions/actions-runner-controller/pull/4300)
|
||||
1. Bump timeout for min runners workflow to 30s [#4306](https://github.com/actions/actions-runner-controller/pull/4306)
|
||||
1. Fix for code scanning alert no. 5: Workflow does not contain permissions [#4292](https://github.com/actions/actions-runner-controller/pull/4292)
|
||||
1. Delete listener resources without requeueing on each call [#4289](https://github.com/actions/actions-runner-controller/pull/4289)
|
||||
1. Fix first interaction action [#4290](https://github.com/actions/actions-runner-controller/pull/4290)
|
||||
1. Bump github/codeql-action from 3 to 4 in the actions group [#4281](https://github.com/actions/actions-runner-controller/pull/4281)
|
||||
|
||||
### 0.13.0
|
||||
|
||||
1. Remove workflow actions version comments since upgrades are done via dependabot [#4161](https://github.com/actions/actions-runner-controller/pull/4161)
|
||||
@@ -85,6 +58,7 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
|
||||
1. Bump the gomod group across 1 directory with 4 updates [#4277](https://github.com/actions/actions-runner-controller/pull/4277)
|
||||
1. Bump all dependencies [#4266](https://github.com/actions/actions-runner-controller/pull/4266)
|
||||
|
||||
|
||||
### 0.12.1
|
||||
|
||||
1. Fix indentation of startupProbe attributes in dind sidecar [#4126](https://github.com/actions/actions-runner-controller/pull/4126)
|
||||
|
||||
@@ -15,12 +15,10 @@ import (
|
||||
func TestGitHubConfig(t *testing.T) {
|
||||
t.Run("when given a valid URL", func(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
configURL string
|
||||
expected *actions.GitHubConfig
|
||||
}{
|
||||
{
|
||||
name: "repository URL",
|
||||
configURL: "https://github.com/org/repo",
|
||||
expected: &actions.GitHubConfig{
|
||||
Scope: actions.GitHubScopeRepository,
|
||||
@@ -31,7 +29,6 @@ func TestGitHubConfig(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "repository URL with trailing slash",
|
||||
configURL: "https://github.com/org/repo/",
|
||||
expected: &actions.GitHubConfig{
|
||||
Scope: actions.GitHubScopeRepository,
|
||||
@@ -42,7 +39,6 @@ func TestGitHubConfig(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "organization URL",
|
||||
configURL: "https://github.com/org",
|
||||
expected: &actions.GitHubConfig{
|
||||
Scope: actions.GitHubScopeOrganization,
|
||||
@@ -53,7 +49,6 @@ func TestGitHubConfig(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "enterprise URL",
|
||||
configURL: "https://github.com/enterprises/my-enterprise",
|
||||
expected: &actions.GitHubConfig{
|
||||
Scope: actions.GitHubScopeEnterprise,
|
||||
@@ -64,7 +59,6 @@ func TestGitHubConfig(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "enterprise URL with trailing slash",
|
||||
configURL: "https://github.com/enterprises/my-enterprise/",
|
||||
expected: &actions.GitHubConfig{
|
||||
Scope: actions.GitHubScopeEnterprise,
|
||||
@@ -75,7 +69,6 @@ func TestGitHubConfig(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "organization URL with www",
|
||||
configURL: "https://www.github.com/org",
|
||||
expected: &actions.GitHubConfig{
|
||||
Scope: actions.GitHubScopeOrganization,
|
||||
@@ -86,7 +79,6 @@ func TestGitHubConfig(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "organization URL with www and trailing slash",
|
||||
configURL: "https://www.github.com/org/",
|
||||
expected: &actions.GitHubConfig{
|
||||
Scope: actions.GitHubScopeOrganization,
|
||||
@@ -97,7 +89,6 @@ func TestGitHubConfig(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "github local URL",
|
||||
configURL: "https://github.localhost/org",
|
||||
expected: &actions.GitHubConfig{
|
||||
Scope: actions.GitHubScopeOrganization,
|
||||
@@ -108,7 +99,6 @@ func TestGitHubConfig(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "github local org URL",
|
||||
configURL: "https://my-ghes.com/org",
|
||||
expected: &actions.GitHubConfig{
|
||||
Scope: actions.GitHubScopeOrganization,
|
||||
@@ -119,7 +109,6 @@ func TestGitHubConfig(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "github local URL with trailing slash",
|
||||
configURL: "https://my-ghes.com/org/",
|
||||
expected: &actions.GitHubConfig{
|
||||
Scope: actions.GitHubScopeOrganization,
|
||||
@@ -130,7 +119,6 @@ func TestGitHubConfig(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "github local URL with ghe.com",
|
||||
configURL: "https://my-ghes.ghe.com/org/",
|
||||
expected: &actions.GitHubConfig{
|
||||
Scope: actions.GitHubScopeOrganization,
|
||||
@@ -143,7 +131,7 @@ func TestGitHubConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Run(test.configURL, func(t *testing.T) {
|
||||
parsedURL, err := url.Parse(strings.Trim(test.configURL, "/"))
|
||||
require.NoError(t, err)
|
||||
test.expected.ConfigURL = parsedURL
|
||||
|
||||
@@ -119,84 +119,88 @@ func TestGitHubAPIError(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseActionsErrorFromResponse(t *testing.T) {
|
||||
func ParseActionsErrorFromResponse(t *testing.T) {
|
||||
t.Run("empty content length", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
ContentLength: 0,
|
||||
Header: http.Header{},
|
||||
StatusCode: 404,
|
||||
Header: http.Header{
|
||||
actions.HeaderActionsActivityID: []string{"activity-id"},
|
||||
},
|
||||
StatusCode: 404,
|
||||
}
|
||||
response.Header.Add(actions.HeaderActionsActivityID, "activity-id")
|
||||
|
||||
err := actions.ParseActionsErrorFromResponse(response)
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, "activity-id", err.(*actions.ActionsError).ActivityID)
|
||||
assert.Equal(t, 404, err.(*actions.ActionsError).StatusCode)
|
||||
assert.Equal(t, "unknown exception", err.(*actions.ActionsError).Err.Error())
|
||||
assert.Equal(t, err.(*actions.ActionsError).ActivityID, "activity-id")
|
||||
assert.Equal(t, err.(*actions.ActionsError).StatusCode, 404)
|
||||
assert.Equal(t, err.(*actions.ActionsError).Err.Error(), "unknown exception")
|
||||
})
|
||||
|
||||
t.Run("contains text plain error", func(t *testing.T) {
|
||||
errorMessage := "example error message"
|
||||
response := &http.Response{
|
||||
ContentLength: int64(len(errorMessage)),
|
||||
StatusCode: 404,
|
||||
Header: http.Header{},
|
||||
Body: io.NopCloser(strings.NewReader(errorMessage)),
|
||||
Header: http.Header{
|
||||
actions.HeaderActionsActivityID: []string{"activity-id"},
|
||||
"Content-Type": []string{"text/plain"},
|
||||
},
|
||||
StatusCode: 404,
|
||||
Body: io.NopCloser(strings.NewReader(errorMessage)),
|
||||
}
|
||||
response.Header.Add(actions.HeaderActionsActivityID, "activity-id")
|
||||
response.Header.Add("Content-Type", "text/plain")
|
||||
|
||||
err := actions.ParseActionsErrorFromResponse(response)
|
||||
require.Error(t, err)
|
||||
var actionsError *actions.ActionsError
|
||||
require.ErrorAs(t, err, &actionsError)
|
||||
assert.Equal(t, "activity-id", actionsError.ActivityID)
|
||||
assert.Equal(t, 404, actionsError.StatusCode)
|
||||
assert.Equal(t, errorMessage, actionsError.Err.Error())
|
||||
assert.ErrorAs(t, err, &actionsError)
|
||||
assert.Equal(t, actionsError.ActivityID, "activity-id")
|
||||
assert.Equal(t, actionsError.StatusCode, 404)
|
||||
assert.Equal(t, actionsError.Err.Error(), errorMessage)
|
||||
})
|
||||
|
||||
t.Run("contains json error", func(t *testing.T) {
|
||||
errorMessage := `{"typeName":"exception-name","message":"example error message"}`
|
||||
response := &http.Response{
|
||||
ContentLength: int64(len(errorMessage)),
|
||||
Header: http.Header{},
|
||||
StatusCode: 404,
|
||||
Body: io.NopCloser(strings.NewReader(errorMessage)),
|
||||
Header: http.Header{
|
||||
actions.HeaderActionsActivityID: []string{"activity-id"},
|
||||
"Content-Type": []string{"application/json"},
|
||||
},
|
||||
StatusCode: 404,
|
||||
Body: io.NopCloser(strings.NewReader(errorMessage)),
|
||||
}
|
||||
response.Header.Add(actions.HeaderActionsActivityID, "activity-id")
|
||||
response.Header.Add("Content-Type", "application/json")
|
||||
|
||||
err := actions.ParseActionsErrorFromResponse(response)
|
||||
require.Error(t, err)
|
||||
var actionsError *actions.ActionsError
|
||||
require.ErrorAs(t, err, &actionsError)
|
||||
assert.Equal(t, "activity-id", actionsError.ActivityID)
|
||||
assert.Equal(t, 404, actionsError.StatusCode)
|
||||
assert.ErrorAs(t, err, &actionsError)
|
||||
assert.Equal(t, actionsError.ActivityID, "activity-id")
|
||||
assert.Equal(t, actionsError.StatusCode, 404)
|
||||
|
||||
inner, ok := actionsError.Err.(*actions.ActionsExceptionError)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "exception-name", inner.ExceptionName)
|
||||
assert.Equal(t, "example error message", inner.Message)
|
||||
assert.Equal(t, inner.ExceptionName, "exception-name")
|
||||
assert.Equal(t, inner.Message, "example error message")
|
||||
})
|
||||
|
||||
t.Run("wrapped exception error", func(t *testing.T) {
|
||||
errorMessage := `{"typeName":"exception-name","message":"example error message"}`
|
||||
response := &http.Response{
|
||||
ContentLength: int64(len(errorMessage)),
|
||||
Header: http.Header{},
|
||||
StatusCode: 404,
|
||||
Body: io.NopCloser(strings.NewReader(errorMessage)),
|
||||
Header: http.Header{
|
||||
actions.HeaderActionsActivityID: []string{"activity-id"},
|
||||
"Content-Type": []string{"application/json"},
|
||||
},
|
||||
StatusCode: 404,
|
||||
Body: io.NopCloser(strings.NewReader(errorMessage)),
|
||||
}
|
||||
response.Header.Add(actions.HeaderActionsActivityID, "activity-id")
|
||||
response.Header.Add("Content-Type", "application/json")
|
||||
|
||||
err := actions.ParseActionsErrorFromResponse(response)
|
||||
require.Error(t, err)
|
||||
|
||||
var actionsExceptionError *actions.ActionsExceptionError
|
||||
require.ErrorAs(t, err, &actionsExceptionError)
|
||||
assert.ErrorAs(t, err, &actionsExceptionError)
|
||||
|
||||
assert.Equal(t, "exception-name", actionsExceptionError.ExceptionName)
|
||||
assert.Equal(t, "example error message", actionsExceptionError.Message)
|
||||
assert.Equal(t, actionsExceptionError.ExceptionName, "exception-name")
|
||||
assert.Equal(t, actionsExceptionError.Message, "example error message")
|
||||
})
|
||||
}
|
||||
|
||||
32
go.mod
32
go.mod
@@ -16,25 +16,25 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
|
||||
github.com/gruntwork-io/terratest v0.54.0
|
||||
github.com/gruntwork-io/terratest v0.53.0
|
||||
github.com/hashicorp/go-retryablehttp v0.7.8
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/ginkgo/v2 v2.27.3
|
||||
github.com/onsi/gomega v1.38.3
|
||||
github.com/onsi/ginkgo/v2 v2.27.2
|
||||
github.com/onsi/gomega v1.38.2
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/teambition/rrule-go v1.8.2
|
||||
go.uber.org/multierr v1.11.0
|
||||
go.uber.org/zap v1.27.1
|
||||
golang.org/x/net v0.48.0
|
||||
golang.org/x/oauth2 v0.34.0
|
||||
golang.org/x/sync v0.19.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/net v0.47.0
|
||||
golang.org/x/oauth2 v0.33.0
|
||||
golang.org/x/sync v0.18.0
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.34.3
|
||||
k8s.io/apimachinery v0.34.3
|
||||
k8s.io/client-go v0.34.3
|
||||
k8s.io/api v0.34.2
|
||||
k8s.io/apimachinery v0.34.2
|
||||
k8s.io/client-go v0.34.2
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4
|
||||
sigs.k8s.io/controller-runtime v0.22.4
|
||||
sigs.k8s.io/yaml v1.6.0
|
||||
@@ -165,14 +165,14 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.46.0 // indirect
|
||||
golang.org/x/crypto v0.45.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 // indirect
|
||||
golang.org/x/mod v0.30.0 // indirect
|
||||
golang.org/x/sys v0.39.0 // indirect
|
||||
golang.org/x/term v0.38.0 // indirect
|
||||
golang.org/x/text v0.32.0 // indirect
|
||||
golang.org/x/mod v0.29.0 // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/time v0.13.0 // indirect
|
||||
golang.org/x/tools v0.39.0 // indirect
|
||||
golang.org/x/tools v0.38.0 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
|
||||
64
go.sum
64
go.sum
@@ -233,8 +233,8 @@ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5T
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/gruntwork-io/go-commons v0.17.2 h1:14dsCJ7M5Vv2X3BIPKeG9Kdy6vTMGhM8L4WZazxfTuY=
|
||||
github.com/gruntwork-io/go-commons v0.17.2/go.mod h1:zs7Q2AbUKuTarBPy19CIxJVUX/rBamfW8IwuWKniWkE=
|
||||
github.com/gruntwork-io/terratest v0.54.0 h1:JOVATYDpU0NAPbEkgYUP50BR2m45UGiR4dbs20sKzck=
|
||||
github.com/gruntwork-io/terratest v0.54.0/go.mod h1:QvwQWZMTJmJB4E0d1Uc18quQm7+X53liKKp+fJSuaKA=
|
||||
github.com/gruntwork-io/terratest v0.53.0 h1:r5U3nfrQCTGvnlJIIh6R5g8z8dwRcjNESYO/wYyOXsI=
|
||||
github.com/gruntwork-io/terratest v0.53.0/go.mod h1:y2Evi+Ac04QpzF3mbRPqrBjipDN7gjqlw6+OZoy2vX4=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
@@ -316,12 +316,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8=
|
||||
github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM=
|
||||
github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4=
|
||||
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
||||
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
@@ -382,8 +382,8 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
|
||||
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
@@ -391,30 +391,30 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 h1:TQwNpfvNkxAVlItJf6Cr5JTsVZoC/Sj7K3OZv2Pc14A=
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
|
||||
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -426,14 +426,14 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
|
||||
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
|
||||
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -441,8 +441,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -475,14 +475,14 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4=
|
||||
k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk=
|
||||
k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY=
|
||||
k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw=
|
||||
k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI=
|
||||
k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc=
|
||||
k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE=
|
||||
k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
|
||||
k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A=
|
||||
k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM=
|
||||
k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4=
|
||||
k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
|
||||
k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M=
|
||||
k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
|
||||
|
||||
Reference in New Issue
Block a user