Compare commits

..

6 Commits

Author SHA1 Message Date
github-actions[bot]
5e85311142 Updates: container-hooks to v0.7.0 2025-04-18 09:06:11 +00:00
Ryosei Karaki
f832b0b254 upgrade(golangci-lint): v2.1.2 (#4023)
Signed-off-by: karamaru-alpha <mrnk3078@gmail.com>
2025-04-17 16:14:31 +02:00
Nikola Jokic
a33d34a036 Pin third party actions (#3981) 2025-04-17 12:19:15 +02:00
Nikola Jokic
15990d492d Include more context to errors raised by github/actions client (#4032)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-04-11 11:36:15 +02:00
dependabot[bot]
462db4dfc8 Bump the gomod group across 1 directory with 7 updates (#4008)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2025-04-07 16:51:07 +02:00
David Maxwell
ea27448da5 Fix busy runners metric (#4016) 2025-04-04 17:17:09 +02:00
54 changed files with 643 additions and 640 deletions

View File

@@ -1,9 +1,9 @@
name: 'Setup ARC E2E Test Action'
description: 'Build controller image, create kind cluster, load the image, and exchange ARC configure token.'
name: "Setup ARC E2E Test Action"
description: "Build controller image, create kind cluster, load the image, and exchange ARC configure token."
inputs:
app-id:
description: 'GitHub App Id for exchange access token'
description: "GitHub App Id for exchange access token"
required: true
app-pk:
description: "GitHub App private key for exchange access token"
@@ -20,30 +20,31 @@ inputs:
outputs:
token:
description: 'Token to use for configure ARC'
description: "Token to use for configure ARC"
value: ${{steps.config-token.outputs.token}}
runs:
using: "composite"
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
# failures pushing images to GHCR
version: v0.9.1
driver-opts: image=moby/buildkit:v0.10.6
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
# failures pushing images to GHCR
version: v0.9.1
driver-opts: image=moby/buildkit:v0.10.6
- name: Build controller image
uses: docker/build-push-action@v5
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
with:
file: Dockerfile
platforms: linux/amd64
load: true
build-args: |
DOCKER_IMAGE_NAME=${{inputs.image-name}}
VERSION=${{inputs.image-tag}}
VERSION=${{inputs.image-tag}}
tags: |
${{inputs.image-name}}:${{inputs.image-tag}}
no-cache: true
@@ -56,8 +57,9 @@ runs:
- name: Get configure token
id: config-token
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ inputs.app-id }}
application_private_key: ${{ inputs.app-pk }}
organization: ${{ inputs.target-org}}
organization: ${{ inputs.target-org}}

View File

@@ -24,23 +24,27 @@ runs:
shell: bash
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# https://github.com/docker/setup-buildx-action/releases/tag/v3.10.0
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
with:
version: latest
- name: Login to DockerHub
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }}
uses: docker/login-action@v3
# https://github.com/docker/login-action/releases/tag/v3.4.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
with:
username: ${{ inputs.username }}
password: ${{ inputs.password }}
- name: Login to GitHub Container Registry
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }}
uses: docker/login-action@v3
# https://github.com/docker/login-action/releases/tag/v3.4.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
with:
registry: ghcr.io
username: ${{ inputs.ghcr_username }}

View File

@@ -5,18 +5,18 @@ name: Publish ARC Helm Charts
on:
push:
branches:
- master
- master
paths:
- 'charts/**'
- '.github/workflows/arc-publish-chart.yaml'
- '!charts/actions-runner-controller/docs/**'
- '!charts/gha-runner-scale-set-controller/**'
- '!charts/gha-runner-scale-set/**'
- '!**.md'
- "charts/**"
- ".github/workflows/arc-publish-chart.yaml"
- "!charts/actions-runner-controller/docs/**"
- "!charts/gha-runner-scale-set-controller/**"
- "!charts/gha-runner-scale-set/**"
- "!**.md"
workflow_dispatch:
inputs:
force:
description: 'Force publish even if the chart version is not bumped'
description: "Force publish even if the chart version is not bumped"
type: boolean
required: true
default: false
@@ -39,86 +39,89 @@ jobs:
outputs:
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
- name: Set up kube-score
run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score
- name: Set up kube-score
run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score
- name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
- name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v5
with:
python-version: '3.11'
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
- name: Set up chart-testing
# https://github.com/helm/chart-testing-action/releases/tag/v2.7.0
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
fi
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
fi
- name: Run chart-testing (lint)
run: |
ct lint --config charts/.ci/ct-config.yaml
- name: Run chart-testing (lint)
run: |
ct lint --config charts/.ci/ct-config.yaml
- name: Create kind cluster
if: steps.list-changed.outputs.changed == 'true'
uses: helm/kind-action@v1.4.0
- name: Create kind cluster
if: steps.list-changed.outputs.changed == 'true'
# https://github.com/helm/kind-action/releases/tag/v1.12.0
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
# We need cert-manager already installed in the cluster because we assume the CRDs exist
- name: Install cert-manager
if: steps.list-changed.outputs.changed == 'true'
run: |
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
# We need cert-manager already installed in the cluster because we assume the CRDs exist
- name: Install cert-manager
if: steps.list-changed.outputs.changed == 'true'
run: |
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
- name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true'
run: ct install --config charts/.ci/ct-config.yaml
- name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true'
run: ct install --config charts/.ci/ct-config.yaml
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
- name: Check if Chart Publish is Needed
id: publish-chart-step
run: |
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
- name: Check if Chart Publish is Needed
id: publish-chart-step
run: |
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
# Always publish if force is true
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
echo "publish=true" >> $GITHUB_OUTPUT
else
echo "publish=false" >> $GITHUB_OUTPUT
fi
# Always publish if force is true
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
echo "publish=true" >> $GITHUB_OUTPUT
else
echo "publish=false" >> $GITHUB_OUTPUT
fi
- name: Job summary
run: |
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
- name: Job summary
run: |
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
publish-chart:
if: needs.lint-chart.outputs.publish-chart == 'true'
@@ -133,80 +136,81 @@ jobs:
CHART_TARGET_BRANCH: master
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
organization: ${{ env.CHART_TARGET_ORG }}
- name: Get Token
id: get_workflow_token
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
organization: ${{ env.CHART_TARGET_ORG }}
- name: Install chart-releaser
uses: helm/chart-releaser-action@v1.4.1
with:
install_only: true
install_dir: ${{ github.workspace }}/bin
- name: Install chart-releaser
uses: helm/chart-releaser-action@cae68fefc6b5f367a0275617c9f83181ba54714f
with:
install_only: true
install_dir: ${{ github.workspace }}/bin
- name: Package and upload release assets
run: |
cr package \
${{ github.workspace }}/charts/actions-runner-controller/ \
--package-path .cr-release-packages
- name: Package and upload release assets
run: |
cr package \
${{ github.workspace }}/charts/actions-runner-controller/ \
--package-path .cr-release-packages
cr upload \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--package-path .cr-release-packages \
--token ${{ secrets.GITHUB_TOKEN }}
cr upload \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--package-path .cr-release-packages \
--token ${{ secrets.GITHUB_TOKEN }}
- name: Generate updated index.yaml
run: |
cr index \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--index-path ${{ github.workspace }}/index.yaml \
--token ${{ secrets.GITHUB_TOKEN }} \
--push \
--pages-branch 'gh-pages' \
--pages-index-path 'index.yaml'
- name: Generate updated index.yaml
run: |
cr index \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--index-path ${{ github.workspace }}/index.yaml \
--token ${{ secrets.GITHUB_TOKEN }} \
--push \
--pages-branch 'gh-pages' \
--pages-index-path 'index.yaml'
# Chart Release was never intended to publish to a different repo
# this workaround is intended to move the index.yaml to the target repo
# where the github pages are hosted
- name: Checkout target repository
uses: actions/checkout@v4
with:
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
path: ${{ env.CHART_TARGET_REPO }}
ref: ${{ env.CHART_TARGET_BRANCH }}
token: ${{ steps.get_workflow_token.outputs.token }}
# Chart Release was never intended to publish to a different repo
# this workaround is intended to move the index.yaml to the target repo
# where the github pages are hosted
- name: Checkout target repository
uses: actions/checkout@v4
with:
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
path: ${{ env.CHART_TARGET_REPO }}
ref: ${{ env.CHART_TARGET_BRANCH }}
token: ${{ steps.get_workflow_token.outputs.token }}
- name: Copy index.yaml
run: |
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
- name: Copy index.yaml
run: |
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
- name: Commit and push to target repository
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
git add .
git commit -m "Update index.yaml"
git push
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
- name: Commit and push to target repository
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
git add .
git commit -m "Update index.yaml"
git push
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
- name: Job summary
run: |
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
- name: Job summary
run: |
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY

View File

@@ -9,17 +9,17 @@ on:
workflow_dispatch:
inputs:
release_tag_name:
description: 'Tag name of the release to publish'
description: "Tag name of the release to publish"
required: true
push_to_registries:
description: 'Push images to registries'
description: "Push images to registries"
required: true
type: boolean
default: false
permissions:
contents: write
packages: write
contents: write
packages: write
env:
TARGET_ORG: actions-runner-controller
@@ -43,7 +43,7 @@ jobs:
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
go-version-file: "go.mod"
- name: Install tools
run: |
@@ -73,6 +73,7 @@ jobs:
- name: Get Token
id: get_workflow_token
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}

View File

@@ -7,10 +7,10 @@ on:
# are available to the workflow run
push:
branches:
- 'master'
- "master"
paths:
- 'runner/VERSION'
- '.github/workflows/arc-release-runners.yaml'
- "runner/VERSION"
- ".github/workflows/arc-release-runners.yaml"
env:
# Safeguard to prevent pushing images to registeries after build
@@ -39,6 +39,7 @@ jobs:
- name: Get Token
id: get_workflow_token
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}

View File

@@ -5,20 +5,20 @@ on:
branches:
- master
paths:
- 'charts/**'
- '.github/workflows/arc-validate-chart.yaml'
- '!charts/actions-runner-controller/docs/**'
- '!**.md'
- '!charts/gha-runner-scale-set-controller/**'
- '!charts/gha-runner-scale-set/**'
- "charts/**"
- ".github/workflows/arc-validate-chart.yaml"
- "!charts/actions-runner-controller/docs/**"
- "!**.md"
- "!charts/gha-runner-scale-set-controller/**"
- "!charts/gha-runner-scale-set/**"
push:
paths:
- 'charts/**'
- '.github/workflows/arc-validate-chart.yaml'
- '!charts/actions-runner-controller/docs/**'
- '!**.md'
- '!charts/gha-runner-scale-set-controller/**'
- '!charts/gha-runner-scale-set/**'
- "charts/**"
- ".github/workflows/arc-validate-chart.yaml"
- "!charts/actions-runner-controller/docs/**"
- "!**.md"
- "!charts/gha-runner-scale-set-controller/**"
- "!charts/gha-runner-scale-set/**"
workflow_dispatch:
env:
KUBE_SCORE_VERSION: 1.10.0
@@ -45,34 +45,19 @@ jobs:
fetch-depth: 0
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
- name: Set up kube-score
run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score
- name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
--ignore-test pod-networkpolicy
--ignore-test deployment-has-poddisruptionbudget
--ignore-test deployment-has-host-podantiaffinity
--ignore-test container-security-context
--ignore-test pod-probes
--ignore-test container-image-tag
--enable-optional-test container-security-context-privileged
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v5
with:
python-version: '3.11'
python-version: "3.11"
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
# https://github.com/helm/chart-testing-action/releases/tag/v2.7.0
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
- name: Run chart-testing (list-changed)
id: list-changed
@@ -87,7 +72,8 @@ jobs:
ct lint --config charts/.ci/ct-config.yaml
- name: Create kind cluster
uses: helm/kind-action@v1.4.0
# https://github.com/helm/kind-action/releases/tag/v1.12.0
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
if: steps.list-changed.outputs.changed == 'true'
# We need cert-manager already installed in the cluster because we assume the CRDs exist

View File

@@ -3,17 +3,17 @@ name: Validate ARC Runners
on:
pull_request:
branches:
- '**'
- "**"
paths:
- 'runner/**'
- 'test/startup/**'
- '!**.md'
- "runner/**"
- "test/startup/**"
- "!**.md"
permissions:
contents: read
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
# This will make sure we only apply the concurrency limits on pull requests
# but not pushes to master branch by making the concurrency group name unique
# for pushes
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -25,28 +25,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: shellcheck
uses: reviewdog/action-shellcheck@v1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
path: "./runner"
pattern: |
*.sh
*.bash
update-status
# Make this consistent with `make shellsheck`
shellcheck_flags: "--shell bash --source-path runner"
exclude: "./.git/*"
check_all_files_with_shebangs: "false"
# Set this to "true" once we addressed all the shellcheck findings
fail_on_error: "false"
- name: "Run shellcheck"
run: make shellcheck
test-runner-entrypoint:
name: Test entrypoint
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Checkout
uses: actions/checkout@v4
- name: Run tests
run: |
make acceptance/runner/startup
- name: Run tests
run: |
make acceptance/runner/startup

View File

@@ -4,27 +4,27 @@ on:
workflow_dispatch:
inputs:
ref:
description: 'The branch, tag or SHA to cut a release from'
description: "The branch, tag or SHA to cut a release from"
required: false
type: string
default: ''
default: ""
release_tag_name:
description: 'The name to tag the controller image with'
description: "The name to tag the controller image with"
required: true
type: string
default: 'canary'
default: "canary"
push_to_registries:
description: 'Push images to registries'
description: "Push images to registries"
required: true
type: boolean
default: false
publish_gha_runner_scale_set_controller_chart:
description: 'Publish new helm chart for gha-runner-scale-set-controller'
description: "Publish new helm chart for gha-runner-scale-set-controller"
required: true
type: boolean
default: false
publish_gha_runner_scale_set_chart:
description: 'Publish new helm chart for gha-runner-scale-set'
description: "Publish new helm chart for gha-runner-scale-set"
required: true
type: boolean
default: false
@@ -72,10 +72,11 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
@@ -84,14 +85,16 @@ jobs:
driver-opts: image=moby/buildkit:v0.10.6
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
# https://github.com/docker/login-action/releases/tag/v3.4.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build & push controller image
uses: docker/build-push-action@v5
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
with:
file: Dockerfile
platforms: linux/amd64,linux/arm64
@@ -140,7 +143,7 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
@@ -188,7 +191,7 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}

View File

@@ -5,16 +5,16 @@ on:
branches:
- master
paths:
- 'charts/**'
- '.github/workflows/gha-validate-chart.yaml'
- '!charts/actions-runner-controller/**'
- '!**.md'
- "charts/**"
- ".github/workflows/gha-validate-chart.yaml"
- "!charts/actions-runner-controller/**"
- "!**.md"
push:
paths:
- 'charts/**'
- '.github/workflows/gha-validate-chart.yaml'
- '!charts/actions-runner-controller/**'
- '!**.md'
- "charts/**"
- ".github/workflows/gha-validate-chart.yaml"
- "!charts/actions-runner-controller/**"
- "!**.md"
workflow_dispatch:
env:
KUBE_SCORE_VERSION: 1.16.1
@@ -41,7 +41,7 @@ jobs:
fetch-depth: 0
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
@@ -49,10 +49,11 @@ jobs:
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v5
with:
python-version: '3.11'
python-version: "3.11"
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
# https://github.com/helm/chart-testing-action/releases/tag/v2.7.0
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
- name: Run chart-testing (list-changed)
id: list-changed
@@ -68,13 +69,14 @@ jobs:
ct lint --config charts/.ci/ct-config-gha.yaml
- name: Set up docker buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
if: steps.list-changed.outputs.changed == 'true'
with:
version: latest
- name: Build controller image
uses: docker/build-push-action@v5
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
if: steps.list-changed.outputs.changed == 'true'
with:
file: Dockerfile
@@ -89,7 +91,8 @@ jobs:
cache-to: type=gha,mode=max
- name: Create kind cluster
uses: helm/kind-action@v1.4.0
# https://github.com/helm/kind-action/releases/tag/v1.12.0
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
if: steps.list-changed.outputs.changed == 'true'
with:
cluster_name: chart-testing
@@ -97,11 +100,11 @@ jobs:
- name: Load image into cluster
if: steps.list-changed.outputs.changed == 'true'
run: |
export DOCKER_IMAGE_NAME=test-arc
export VERSION=dev
export IMG_RESULT=load
make docker-buildx
kind load docker-image test-arc:dev --name chart-testing
export DOCKER_IMAGE_NAME=test-arc
export VERSION=dev
export IMG_RESULT=load
make docker-buildx
kind load docker-image test-arc:dev --name chart-testing
- name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true'

View File

@@ -7,30 +7,30 @@ on:
branches:
- master
paths-ignore:
- '**.md'
- '.github/actions/**'
- '.github/ISSUE_TEMPLATE/**'
- '.github/workflows/e2e-test-dispatch-workflow.yaml'
- '.github/workflows/gha-e2e-tests.yaml'
- '.github/workflows/arc-publish.yaml'
- '.github/workflows/arc-publish-chart.yaml'
- '.github/workflows/gha-publish-chart.yaml'
- '.github/workflows/arc-release-runners.yaml'
- '.github/workflows/global-run-codeql.yaml'
- '.github/workflows/global-run-first-interaction.yaml'
- '.github/workflows/global-run-stale.yaml'
- '.github/workflows/arc-update-runners-scheduled.yaml'
- '.github/workflows/validate-arc.yaml'
- '.github/workflows/arc-validate-chart.yaml'
- '.github/workflows/gha-validate-chart.yaml'
- '.github/workflows/arc-validate-runners.yaml'
- '.github/dependabot.yml'
- '.github/RELEASE_NOTE_TEMPLATE.md'
- 'runner/**'
- '.gitignore'
- 'PROJECT'
- 'LICENSE'
- 'Makefile'
- "**.md"
- ".github/actions/**"
- ".github/ISSUE_TEMPLATE/**"
- ".github/workflows/e2e-test-dispatch-workflow.yaml"
- ".github/workflows/gha-e2e-tests.yaml"
- ".github/workflows/arc-publish.yaml"
- ".github/workflows/arc-publish-chart.yaml"
- ".github/workflows/gha-publish-chart.yaml"
- ".github/workflows/arc-release-runners.yaml"
- ".github/workflows/global-run-codeql.yaml"
- ".github/workflows/global-run-first-interaction.yaml"
- ".github/workflows/global-run-stale.yaml"
- ".github/workflows/arc-update-runners-scheduled.yaml"
- ".github/workflows/validate-arc.yaml"
- ".github/workflows/arc-validate-chart.yaml"
- ".github/workflows/gha-validate-chart.yaml"
- ".github/workflows/arc-validate-runners.yaml"
- ".github/dependabot.yml"
- ".github/RELEASE_NOTE_TEMPLATE.md"
- "runner/**"
- ".gitignore"
- "PROJECT"
- "LICENSE"
- "Makefile"
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
permissions:
@@ -59,6 +59,7 @@ jobs:
- name: Get Token
id: get_workflow_token
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
@@ -93,7 +94,8 @@ jobs:
uses: actions/checkout@v4
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
# https://github.com/docker/login-action/releases/tag/v3.4.0
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -110,16 +112,19 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# https://github.com/docker/setup-buildx-action/releases/tag/v3.10.0
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
with:
version: latest
# Unstable builds - run at your own risk
- name: Build and Push
uses: docker/build-push-action@v5
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
with:
context: .
file: ./Dockerfile

View File

@@ -4,16 +4,16 @@ on:
branches:
- master
paths:
- '.github/workflows/go.yaml'
- '**.go'
- 'go.mod'
- 'go.sum'
- ".github/workflows/go.yaml"
- "**.go"
- "go.mod"
- "go.sum"
pull_request:
paths:
- '.github/workflows/go.yaml'
- '**.go'
- 'go.mod'
- 'go.sum'
- ".github/workflows/go.yaml"
- "**.go"
- "go.mod"
- "go.sum"
permissions:
contents: read
@@ -32,7 +32,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
go-version-file: "go.mod"
cache: false
- name: fmt
run: go fmt ./...
@@ -45,13 +45,14 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
go-version-file: "go.mod"
cache: false
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
# https://github.com/golangci/golangci-lint-action/releases/tag/v7.0.0
uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd
with:
only-new-issues: true
version: v1.55.2
version: v2.1.2
generate:
runs-on: ubuntu-latest
@@ -59,7 +60,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
go-version-file: "go.mod"
cache: false
- name: Generate
run: make generate
@@ -72,7 +73,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
go-version-file: "go.mod"
- run: make manifests
- name: Check diff
run: git diff --exit-code

View File

@@ -1,19 +1,14 @@
version: "2"
run:
timeout: 3m
output:
formats:
- format: github-actions
path: stdout
linters-settings:
errcheck:
exclude-functions:
- (net/http.ResponseWriter).Write
- (*net/http.Server).Shutdown
- (*github.com/actions/actions-runner-controller/simulator.VisibleRunnerGroups).Add
- (*github.com/actions/actions-runner-controller/testing.Kind).Stop
issues:
exclude-rules:
- path: controllers/suite_test.go
linters:
- staticcheck
text: "SA1019"
timeout: 5m
linters:
settings:
errcheck:
exclude-functions:
- (net/http.ResponseWriter).Write
- (*net/http.Server).Shutdown
- (*github.com/actions/actions-runner-controller/simulator.VisibleRunnerGroups).Add
- (*github.com/actions/actions-runner-controller/testing.Kind).Stop
exclusions:
presets:
- std-error-handling

View File

@@ -20,7 +20,7 @@ KUBECONTEXT ?= kind-acceptance
CLUSTER ?= acceptance
CERT_MANAGER_VERSION ?= v1.1.1
KUBE_RBAC_PROXY_VERSION ?= v0.11.0
SHELLCHECK_VERSION ?= 0.8.0
SHELLCHECK_VERSION ?= 0.10.0
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=true"
@@ -68,7 +68,7 @@ endif
all: manager
lint:
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.57.2 golangci-lint run
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v2.1.2 golangci-lint run
GO_TEST_ARGS ?= -short
@@ -204,7 +204,7 @@ generate: controller-gen
# Run shellcheck on runner scripts
shellcheck: shellcheck-install
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh hack/*.sh
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh runner/update-status hack/*.sh
docker-buildx:
export DOCKER_CLI_EXPERIMENTAL=enabled ;\
@@ -310,7 +310,7 @@ github-release: release
# Otherwise we get errors like the below:
# Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property]
#
# Note that controller-gen newer than 0.6.2 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
# Note that controller-gen newer than 0.7.0 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
# Otherwise ObjectMeta embedded in Spec results in empty on the storage.
controller-gen:
ifeq (, $(shell which controller-gen))

View File

@@ -5,22 +5,23 @@ on:
env:
IRSA_ROLE_ARN:
ASSUME_ROLE_ARN:
AWS_REGION:
ASSUME_ROLE_ARN:
AWS_REGION:
jobs:
assume-role-in-runner-test:
runs-on: ['self-hosted', 'Linux']
runs-on: ["self-hosted", "Linux"]
steps:
- name: Test aws-actions/configure-aws-credentials Action
uses: aws-actions/configure-aws-credentials@v1
# https://github.com/aws-actions/configure-aws-credentials/releases/tag/v4.1.0
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722
with:
aws-region: ${{ env.AWS_REGION }}
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
role-duration-seconds: 900
assume-role-in-container-test:
runs-on: ['self-hosted', 'Linux']
container:
runs-on: ["self-hosted", "Linux"]
container:
image: amazon/aws-cli
env:
AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
@@ -29,7 +30,8 @@ jobs:
- /var/run/secrets/eks.amazonaws.com/serviceaccount/token:/var/run/secrets/eks.amazonaws.com/serviceaccount/token
steps:
- name: Test aws-actions/configure-aws-credentials Action in container
uses: aws-actions/configure-aws-credentials@v1
# https://github.com/aws-actions/configure-aws-credentials/releases/tag/v4.1.0
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722
with:
aws-region: ${{ env.AWS_REGION }}
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}

View File

@@ -8,8 +8,8 @@ env:
jobs:
run-step-in-container-test:
runs-on: ['self-hosted', 'Linux']
container:
runs-on: ["self-hosted", "Linux"]
container:
image: alpine
steps:
- name: Test we are working in the container
@@ -21,7 +21,7 @@ jobs:
exit 1
fi
setup-python-test:
runs-on: ['self-hosted', 'Linux']
runs-on: ["self-hosted", "Linux"]
steps:
- name: Print native Python environment
run: |
@@ -41,12 +41,12 @@ jobs:
echo "Python version detected : $(python --version 2>&1)"
fi
setup-node-test:
runs-on: ['self-hosted', 'Linux']
runs-on: ["self-hosted", "Linux"]
steps:
- uses: actions/setup-node@v2
with:
node-version: '12'
- name: Test actions/setup-node works
node-version: "12"
- name: Test actions/setup-node works
run: |
VERSION=$(node --version | cut -c 2- | cut -d '.' -f1)
if [[ $VERSION != '12' ]]; then
@@ -57,13 +57,14 @@ jobs:
echo "Node version detected : $(node --version 2>&1)"
fi
setup-ruby-test:
runs-on: ['self-hosted', 'Linux']
runs-on: ["self-hosted", "Linux"]
steps:
- uses: ruby/setup-ruby@v1
# https://github.com/ruby/setup-ruby/releases/tag/v1.227.0
- uses: ruby/setup-ruby@1a615958ad9d422dd932dc1d5823942ee002799f
with:
ruby-version: 3.0
bundler-cache: true
- name: Test ruby/setup-ruby works
- name: Test ruby/setup-ruby works
run: |
VERSION=$(ruby --version | cut -d ' ' -f2 | cut -d '.' -f1-2)
if [[ $VERSION != '3.0' ]]; then
@@ -74,8 +75,8 @@ jobs:
echo "Ruby version detected : $(ruby --version 2>&1)"
fi
python-shell-test:
runs-on: ['self-hosted', 'Linux']
steps:
runs-on: ["self-hosted", "Linux"]
steps:
- name: Test Python shell works
run: |
import os

View File

@@ -215,10 +215,10 @@ func (rs *RunnerSpec) validateRepository() error {
foundCount += 1
}
if foundCount == 0 {
return errors.New("Spec needs enterprise, organization or repository")
return errors.New("spec needs enterprise, organization or repository")
}
if foundCount > 1 {
return errors.New("Spec cannot have many fields defined enterprise, organization and repository")
return errors.New("spec cannot have many fields defined enterprise, organization and repository")
}
return nil

View File

@@ -1,9 +1,11 @@
# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow
remote: origin
target-branch: master
lint-conf: charts/.ci/lint-config.yaml
chart-repos:
- jetstack=https://charts.jetstack.io
check-version-increment: false # Disable checking that the chart version has been bumped
charts:
- charts/gha-runner-scale-set-controller
- charts/gha-runner-scale-set
- charts/gha-runner-scale-set-controller
- charts/gha-runner-scale-set
skip-clean-up: true

View File

@@ -1,7 +1,9 @@
# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow
remote: origin
target-branch: master
lint-conf: charts/.ci/lint-config.yaml
chart-repos:
- jetstack=https://charts.jetstack.io
check-version-increment: false # Disable checking that the chart version has been bumped
charts:
- charts/actions-runner-controller
- charts/actions-runner-controller

View File

@@ -1,6 +1,5 @@
#!/bin/bash
for chart in `ls charts`;
do
helm template --values charts/$chart/ci/ci-values.yaml charts/$chart | kube-score score - \
@@ -12,4 +11,4 @@ helm template --values charts/$chart/ci/ci-values.yaml charts/$chart | kube-scor
--enable-optional-test container-security-context-privileged \
--enable-optional-test container-security-context-readonlyrootfilesystem \
--ignore-test container-security-context
done
done

View File

@@ -1178,7 +1178,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
}
}
require.NotNil(t, volume)
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name)
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
@@ -1238,7 +1238,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
}
}
require.NotNil(t, volume)
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name)
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
@@ -1298,7 +1298,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
}
}
require.NotNil(t, volume)
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name)
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)

View File

@@ -287,7 +287,7 @@ func (e *exporter) ListenAndServe(ctx context.Context) error {
}
func (e *exporter) setGauge(name string, allLabels prometheus.Labels, val float64) {
m, ok := e.metrics.gauges[name]
m, ok := e.gauges[name]
if !ok {
return
}
@@ -299,7 +299,7 @@ func (e *exporter) setGauge(name string, allLabels prometheus.Labels, val float6
}
func (e *exporter) incCounter(name string, allLabels prometheus.Labels) {
m, ok := e.metrics.counters[name]
m, ok := e.counters[name]
if !ok {
return
}
@@ -311,7 +311,7 @@ func (e *exporter) incCounter(name string, allLabels prometheus.Labels) {
}
func (e *exporter) observeHistogram(name string, allLabels prometheus.Labels, val float64) {
m, ok := e.metrics.histograms[name]
m, ok := e.histograms[name]
if !ok {
return
}
@@ -331,7 +331,7 @@ func (e *exporter) PublishStatistics(stats *actions.RunnerScaleSetStatistic) {
e.setGauge(MetricAssignedJobs, e.scaleSetLabels, float64(stats.TotalAssignedJobs))
e.setGauge(MetricRunningJobs, e.scaleSetLabels, float64(stats.TotalRunningJobs))
e.setGauge(MetricRegisteredRunners, e.scaleSetLabels, float64(stats.TotalRegisteredRunners))
e.setGauge(MetricBusyRunners, e.scaleSetLabels, float64(float64(stats.TotalRegisteredRunners)))
e.setGauge(MetricBusyRunners, e.scaleSetLabels, float64(float64(stats.TotalBusyRunners)))
e.setGauge(MetricIdleRunners, e.scaleSetLabels, float64(stats.TotalIdleRunners))
}
@@ -339,7 +339,7 @@ func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
l := e.startedJobLabels(msg)
e.incCounter(MetricStartedJobsTotal, l)
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
startupDuration := msg.RunnerAssignTime.Unix() - msg.ScaleSetAssignTime.Unix()
e.observeHistogram(MetricJobStartupDurationSeconds, l, float64(startupDuration))
}
@@ -347,7 +347,7 @@ func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
l := e.completedJobLabels(msg)
e.incCounter(MetricCompletedJobsTotal, l)
executionDuration := msg.JobMessageBase.FinishTime.Unix() - msg.JobMessageBase.RunnerAssignTime.Unix()
executionDuration := msg.FinishTime.Unix() - msg.RunnerAssignTime.Unix()
e.observeHistogram(MetricJobExecutionDurationSeconds, l, float64(executionDuration))
}

View File

@@ -77,7 +77,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !autoscalingListener.ObjectMeta.DeletionTimestamp.IsZero() {
if !autoscalingListener.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(autoscalingListener, autoscalingListenerFinalizerName) {
return ctrl.Result{}, nil
}
@@ -281,7 +281,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerPod)
switch {
case err == nil:
if listenerPod.ObjectMeta.DeletionTimestamp.IsZero() {
if listenerPod.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener pod")
if err := r.Delete(ctx, listenerPod); err != nil {
return false, fmt.Errorf("failed to delete listener pod: %w", err)
@@ -299,7 +299,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &secret)
switch {
case err == nil:
if secret.ObjectMeta.DeletionTimestamp.IsZero() {
if secret.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener config secret")
if err := r.Delete(ctx, &secret); err != nil {
return false, fmt.Errorf("failed to delete listener config secret: %w", err)
@@ -316,7 +316,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
err = r.Get(ctx, types.NamespacedName{Name: proxyListenerSecretName(autoscalingListener), Namespace: autoscalingListener.Namespace}, proxySecret)
switch {
case err == nil:
if proxySecret.ObjectMeta.DeletionTimestamp.IsZero() {
if proxySecret.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener proxy secret")
if err := r.Delete(ctx, proxySecret); err != nil {
return false, fmt.Errorf("failed to delete listener proxy secret: %w", err)
@@ -333,7 +333,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRoleBinding)
switch {
case err == nil:
if listenerRoleBinding.ObjectMeta.DeletionTimestamp.IsZero() {
if listenerRoleBinding.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener role binding")
if err := r.Delete(ctx, listenerRoleBinding); err != nil {
return false, fmt.Errorf("failed to delete listener role binding: %w", err)
@@ -349,7 +349,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRole)
switch {
case err == nil:
if listenerRole.ObjectMeta.DeletionTimestamp.IsZero() {
if listenerRole.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener role")
if err := r.Delete(ctx, listenerRole); err != nil {
return false, fmt.Errorf("failed to delete listener role: %w", err)
@@ -366,7 +366,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
err = r.Get(ctx, types.NamespacedName{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, listenerSa)
switch {
case err == nil:
if listenerSa.ObjectMeta.DeletionTimestamp.IsZero() {
if listenerSa.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener service account")
if err := r.Delete(ctx, listenerSa); err != nil {
return false, fmt.Errorf("failed to delete listener service account: %w", err)
@@ -382,7 +382,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
}
func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
newServiceAccount := r.ResourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
newServiceAccount := r.newScaleSetListenerServiceAccount(autoscalingListener)
if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil {
return ctrl.Result{}, err
@@ -467,7 +467,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
logger.Info("Creating listener config secret")
podConfig, err := r.ResourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
podConfig, err := r.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
if err != nil {
logger.Error(err, "Failed to build listener config secret")
return ctrl.Result{}, err
@@ -486,7 +486,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
return ctrl.Result{Requeue: true}, nil
}
newPod, err := r.ResourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
newPod, err := r.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
if err != nil {
logger.Error(err, "Failed to build listener pod")
return ctrl.Result{}, err
@@ -546,7 +546,7 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
}
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
newListenerSecret := r.ResourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
newListenerSecret := r.newScaleSetListenerSecretMirror(autoscalingListener, secret)
if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
return ctrl.Result{}, err
@@ -618,7 +618,7 @@ func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Con
}
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
newRole := r.ResourceBuilder.newScaleSetListenerRole(autoscalingListener)
newRole := r.newScaleSetListenerRole(autoscalingListener)
logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
if err := r.Create(ctx, newRole); err != nil {
@@ -646,7 +646,7 @@ func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Contex
}
func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) {
newRoleBinding := r.ResourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
newRoleBinding := r.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
logger.Info("Creating listener role binding",
"namespace", newRoleBinding.Namespace,

View File

@@ -99,7 +99,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !autoscalingRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
if !autoscalingRunnerSet.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) {
return ctrl.Result{}, nil
}
@@ -332,7 +332,7 @@ func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, au
err = r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, &listener)
switch {
case err == nil:
if listener.ObjectMeta.DeletionTimestamp.IsZero() {
if listener.DeletionTimestamp.IsZero() {
logger.Info("Deleting the listener")
if err := r.Delete(ctx, &listener); err != nil {
return false, fmt.Errorf("failed to delete listener: %w", err)
@@ -369,7 +369,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
for i := range oldRunnerSets {
rs := &oldRunnerSets[i]
// already deleted but contains finalizer so it still exists
if !rs.ObjectMeta.DeletionTimestamp.IsZero() {
if !rs.DeletionTimestamp.IsZero() {
logger.Info("Skip ephemeral runner set since it is already marked for deletion", "name", rs.Name)
continue
}
@@ -622,7 +622,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
}
func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) {
desiredRunnerSet, err := r.ResourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
desiredRunnerSet, err := r.newEphemeralRunnerSet(autoscalingRunnerSet)
if err != nil {
log.Error(err, "Could not create EphemeralRunnerSet")
return ctrl.Result{}, err
@@ -651,7 +651,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
})
}
autoscalingListener, err := r.ResourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
autoscalingListener, err := r.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
if err != nil {
log.Error(err, "Could not create AutoscalingListener spec")
return ctrl.Result{}, err

View File

@@ -280,10 +280,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
// This should trigger re-creation of EphemeralRunnerSet and Listener
patched := autoscalingRunnerSet.DeepCopy()
patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
if patched.ObjectMeta.Annotations == nil {
patched.ObjectMeta.Annotations = make(map[string]string)
if patched.Annotations == nil {
patched.Annotations = make(map[string]string)
}
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "test-hash"
patched.Annotations[annotationKeyValuesHash] = "test-hash"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
autoscalingRunnerSet = patched.DeepCopy()
@@ -383,7 +383,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
Expect(err).NotTo(HaveOccurred(), "failed to get Listener")
patched = autoscalingRunnerSet.DeepCopy()
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "hash-changes"
patched.Annotations[annotationKeyValuesHash] = "hash-changes"
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
@@ -546,10 +546,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
// Patch the AutoScalingRunnerSet image which should trigger
// the recreation of the Listener and EphemeralRunnerSet
patched := autoscalingRunnerSet.DeepCopy()
if patched.ObjectMeta.Annotations == nil {
patched.ObjectMeta.Annotations = make(map[string]string)
if patched.Annotations == nil {
patched.Annotations = make(map[string]string)
}
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "testgroup2"
patched.Annotations[annotationKeyValuesHash] = "testgroup2"
patched.Spec.Template.Spec = corev1.PodSpec{
Containers: []corev1.Container{
{
@@ -875,7 +875,7 @@ var _ = Describe("Test AutoscalingController creation failures", Ordered, func()
autoscalingRunnerSetTestInterval,
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
ars.ObjectMeta.Annotations = make(map[string]string)
ars.Annotations = make(map[string]string)
err = k8sClient.Update(ctx, ars)
Expect(err).NotTo(HaveOccurred(), "Update autoscaling runner set without annotation should be successful")

View File

@@ -70,7 +70,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !ephemeralRunner.ObjectMeta.DeletionTimestamp.IsZero() {
if !ephemeralRunner.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
return ctrl.Result{}, nil
}
@@ -319,7 +319,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
err := r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, pod)
switch {
case err == nil:
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
if pod.DeletionTimestamp.IsZero() {
log.Info("Deleting the runner pod")
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
return fmt.Errorf("failed to delete pod: %w", err)
@@ -339,7 +339,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, secret)
switch {
case err == nil:
if secret.ObjectMeta.DeletionTimestamp.IsZero() {
if secret.DeletionTimestamp.IsZero() {
log.Info("Deleting the jitconfig secret")
if err := r.Delete(ctx, secret); err != nil && !kerrors.IsNotFound(err) {
return fmt.Errorf("failed to delete secret: %w", err)
@@ -393,7 +393,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
var errs []error
for i := range runnerLinkedPodList.Items {
linkedPod := &runnerLinkedPodList.Items[i]
if !linkedPod.ObjectMeta.DeletionTimestamp.IsZero() {
if !linkedPod.DeletionTimestamp.IsZero() {
continue
}
@@ -409,7 +409,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
runnerLinkedLabels := client.MatchingLabels(
map[string]string{
"runner-pod": ephemeralRunner.ObjectMeta.Name,
"runner-pod": ephemeralRunner.Name,
},
)
var runnerLinkedSecretList corev1.SecretList
@@ -427,7 +427,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
var errs []error
for i := range runnerLinkedSecretList.Items {
s := &runnerLinkedSecretList.Items[i]
if !s.ObjectMeta.DeletionTimestamp.IsZero() {
if !s.DeletionTimestamp.IsZero() {
continue
}
@@ -474,7 +474,7 @@ func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemera
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
// It should not be responsible for setting the status to Failed.
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
if pod.DeletionTimestamp.IsZero() {
log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
return fmt.Errorf("failed to delete pod with status failed: %w", err)
@@ -640,7 +640,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
}
log.Info("Creating new pod for ephemeral runner")
newPod := r.ResourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...)
newPod := r.newEphemeralRunnerPod(ctx, runner, secret, envs...)
if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil {
log.Error(err, "Failed to set controller reference to a new pod")
@@ -665,7 +665,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
log.Info("Creating new secret for ephemeral runner")
jitSecret := r.ResourceBuilder.newEphemeralRunnerJitSecret(runner)
jitSecret := r.newEphemeralRunnerJitSecret(runner)
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %w", err)

View File

@@ -83,7 +83,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
}
// Requested deletion does not need reconciled.
if !ephemeralRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
if !ephemeralRunnerSet.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(ephemeralRunnerSet, ephemeralRunnerSetFinalizerName) {
return ctrl.Result{}, nil
}
@@ -360,7 +360,7 @@ func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Contex
// Track multiple errors at once and return the bundle.
errs := make([]error, 0)
for i := 0; i < count; i++ {
ephemeralRunner := r.ResourceBuilder.newEphemeralRunner(runnerSet)
ephemeralRunner := r.newEphemeralRunner(runnerSet)
if runnerSet.Spec.EphemeralRunnerSpec.Proxy != nil {
ephemeralRunner.Spec.ProxySecretRef = proxyEphemeralRunnerSetSecretName(runnerSet)
}
@@ -641,7 +641,7 @@ func newEphemeralRunnerState(ephemeralRunnerList *v1alpha1.EphemeralRunnerList)
if err == nil && patchID > ephemeralRunnerState.latestPatchID {
ephemeralRunnerState.latestPatchID = patchID
}
if !r.ObjectMeta.DeletionTimestamp.IsZero() {
if !r.DeletionTimestamp.IsZero() {
ephemeralRunnerState.deleting = append(ephemeralRunnerState.deleting, r)
continue
}

View File

@@ -543,8 +543,8 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
GenerateName: autoscalingRunnerSet.Name + "-",
Namespace: autoscalingRunnerSet.Namespace,
Labels: labels,
Annotations: newAnnotations,
OwnerReferences: []metav1.OwnerReference{
@@ -617,18 +617,18 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
labels := map[string]string{}
annotations := map[string]string{}
for k, v := range runner.ObjectMeta.Labels {
for k, v := range runner.Labels {
labels[k] = v
}
for k, v := range runner.Spec.PodTemplateSpec.Labels {
for k, v := range runner.Spec.Labels {
labels[k] = v
}
labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue)
for k, v := range runner.ObjectMeta.Annotations {
for k, v := range runner.Annotations {
annotations[k] = v
}
for k, v := range runner.Spec.PodTemplateSpec.Annotations {
for k, v := range runner.Spec.Annotations {
annotations[k] = v
}
@@ -640,8 +640,8 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
)
objectMeta := metav1.ObjectMeta{
Name: runner.ObjectMeta.Name,
Namespace: runner.ObjectMeta.Namespace,
Name: runner.Name,
Namespace: runner.Namespace,
Labels: labels,
Annotations: annotations,
OwnerReferences: []metav1.OwnerReference{
@@ -657,10 +657,10 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
}
newPod.ObjectMeta = objectMeta
newPod.Spec = runner.Spec.PodTemplateSpec.Spec
newPod.Spec.Containers = make([]corev1.Container, 0, len(runner.Spec.PodTemplateSpec.Spec.Containers))
newPod.Spec = runner.Spec.Spec
newPod.Spec.Containers = make([]corev1.Container, 0, len(runner.Spec.Spec.Containers))
for _, c := range runner.Spec.PodTemplateSpec.Spec.Containers {
for _, c := range runner.Spec.Spec.Containers {
if c.Name == v1alpha1.EphemeralRunnerContainerName {
c.Env = append(
c.Env,

View File

@@ -345,7 +345,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
}
var runnerPodList corev1.PodList
if err := r.Client.List(ctx, &runnerPodList, client.InNamespace(hra.Namespace), client.MatchingLabels(map[string]string{
if err := r.List(ctx, &runnerPodList, client.InNamespace(hra.Namespace), client.MatchingLabels(map[string]string{
kindLabel: hra.Spec.ScaleTargetRef.Name,
})); err != nil {
return nil, err

View File

@@ -29,7 +29,7 @@ func newGithubClient(server *httptest.Server) *github.Client {
if err != nil {
panic(err)
}
client.Client.BaseURL = baseURL
client.BaseURL = baseURL
return client
}

View File

@@ -82,8 +82,8 @@ func (s *batchScaler) Add(st *ScaleTarget) {
break batch
case st := <-s.queue:
nsName := types.NamespacedName{
Namespace: st.HorizontalRunnerAutoscaler.Namespace,
Name: st.HorizontalRunnerAutoscaler.Name,
Namespace: st.Namespace,
Name: st.Name,
}
b, ok := batches[nsName]
if !ok {
@@ -208,7 +208,7 @@ func (s *batchScaler) planBatchScale(ctx context.Context, batch batchScaleOperat
//
// In other words, updating HRA.spec.scaleTriggers[].duration does not result in delaying capacity reservations expiration any longer
// than the "intended" duration, which is the duration of the trigger when the reservation was created.
duration := copy.Spec.CapacityReservations[i].ExpirationTime.Time.Sub(copy.Spec.CapacityReservations[i].EffectiveTime.Time)
duration := copy.Spec.CapacityReservations[i].ExpirationTime.Sub(copy.Spec.CapacityReservations[i].EffectiveTime.Time)
copy.Spec.CapacityReservations[i].EffectiveTime = metav1.Time{Time: now}
copy.Spec.CapacityReservations[i].ExpirationTime = metav1.Time{Time: now.Add(duration)}
}

View File

@@ -503,13 +503,13 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getManagedRunnerGroup
switch kind {
case "RunnerSet":
var rs v1alpha1.RunnerSet
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
return groups, err
}
o, e, g = rs.Spec.Organization, rs.Spec.Enterprise, rs.Spec.Group
case "RunnerDeployment", "":
var rd v1alpha1.RunnerDeployment
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
return groups, err
}
o, e, g = rd.Spec.Template.Spec.Organization, rd.Spec.Template.Spec.Enterprise, rd.Spec.Template.Spec.Group
@@ -562,7 +562,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getJobScaleTarget(ctx
HRA:
for _, hra := range hras {
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
if !hra.DeletionTimestamp.IsZero() {
continue
}
@@ -603,7 +603,7 @@ HRA:
case "RunnerSet":
var rs v1alpha1.RunnerSet
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
return nil, err
}
@@ -634,7 +634,7 @@ HRA:
case "RunnerDeployment", "":
var rd v1alpha1.RunnerDeployment
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
return nil, err
}
@@ -676,7 +676,7 @@ func getValidCapacityReservations(autoscaler *v1alpha1.HorizontalRunnerAutoscale
now := time.Now()
for _, reservation := range autoscaler.Spec.CapacityReservations {
if reservation.ExpirationTime.Time.After(now) {
if reservation.ExpirationTime.After(now) {
capacityReservations = append(capacityReservations, reservation)
}
}
@@ -713,7 +713,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) indexer(rawObj client
switch hra.Spec.ScaleTargetRef.Kind {
case "", "RunnerDeployment":
var rd v1alpha1.RunnerDeployment
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerDeployment not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name))
return nil
}
@@ -740,7 +740,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) indexer(rawObj client
return keys
case "RunnerSet":
var rs v1alpha1.RunnerSet
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerSet not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name))
return nil
}

View File

@@ -71,7 +71,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
if !hra.DeletionTimestamp.IsZero() {
r.GitHubClient.DeinitForHRA(&hra)
return ctrl.Result{}, nil
@@ -91,7 +91,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !rd.ObjectMeta.DeletionTimestamp.IsZero() {
if !rd.DeletionTimestamp.IsZero() {
return ctrl.Result{}, nil
}
@@ -120,14 +120,14 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
}
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
if err := r.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
return fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err)
}
} else if ephemeral && effectiveTime != nil {
copy := rd.DeepCopy()
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
if err := r.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
return fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err)
}
}
@@ -142,7 +142,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !rs.ObjectMeta.DeletionTimestamp.IsZero() {
if !rs.DeletionTimestamp.IsZero() {
return ctrl.Result{}, nil
}
@@ -160,7 +160,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
org: rs.Spec.Organization,
repo: rs.Spec.Repository,
replicas: replicas,
labels: rs.Spec.RunnerConfig.Labels,
labels: rs.Spec.Labels,
getRunnerMap: func() (map[string]struct{}, error) {
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
var runnerPodList corev1.PodList
@@ -224,14 +224,14 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
}
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
if err := r.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
return fmt.Errorf("patching runnerset to have %d replicas: %w", newDesiredReplicas, err)
}
} else if ephemeral && effectiveTime != nil {
copy := rs.DeepCopy()
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
if err := r.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
return fmt.Errorf("patching runnerset to have %d replicas: %w", newDesiredReplicas, err)
}
}
@@ -253,7 +253,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) scaleTargetFromRD(ctx context.Con
org: rd.Spec.Template.Spec.Organization,
repo: rd.Spec.Template.Spec.Repository,
replicas: rd.Spec.Replicas,
labels: rd.Spec.Template.Spec.RunnerConfig.Labels,
labels: rd.Spec.Template.Spec.Labels,
getRunnerMap: func() (map[string]struct{}, error) {
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
var runnerList v1alpha1.RunnerList
@@ -484,7 +484,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(ghc *arc
var reserved int
for _, reservation := range hra.Spec.CapacityReservations {
if reservation.ExpirationTime.Time.After(now) {
if reservation.ExpirationTime.After(now) {
reserved += reservation.Replicas
}
}

View File

@@ -20,12 +20,13 @@ import (
"context"
"errors"
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
"reflect"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/actions/actions-runner-controller/build"
"github.com/actions/actions-runner-controller/hash"
"github.com/go-logr/logr"
@@ -107,12 +108,12 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if runner.ObjectMeta.DeletionTimestamp.IsZero() {
finalizers, added := addFinalizer(runner.ObjectMeta.Finalizers, finalizerName)
if runner.DeletionTimestamp.IsZero() {
finalizers, added := addFinalizer(runner.Finalizers, finalizerName)
if added {
newRunner := runner.DeepCopy()
newRunner.ObjectMeta.Finalizers = finalizers
newRunner.Finalizers = finalizers
if err := r.Update(ctx, newRunner); err != nil {
log.Error(err, "Failed to update runner")
@@ -271,11 +272,11 @@ func ephemeralRunnerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {
}
func (r *RunnerReconciler) processRunnerDeletion(runner v1alpha1.Runner, ctx context.Context, log logr.Logger, pod *corev1.Pod) (reconcile.Result, error) {
finalizers, removed := removeFinalizer(runner.ObjectMeta.Finalizers, finalizerName)
finalizers, removed := removeFinalizer(runner.Finalizers, finalizerName)
if removed {
newRunner := runner.DeepCopy()
newRunner.ObjectMeta.Finalizers = finalizers
newRunner.Finalizers = finalizers
if err := r.Patch(ctx, newRunner, client.MergeFrom(&runner)); err != nil {
log.Error(err, "Unable to remove finalizer")
@@ -305,8 +306,8 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
if needsServiceAccount {
serviceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: runner.ObjectMeta.Name,
Namespace: runner.ObjectMeta.Namespace,
Name: runner.Name,
Namespace: runner.Namespace,
},
}
if res := r.createObject(ctx, serviceAccount, serviceAccount.ObjectMeta, &runner, log); res != nil {
@@ -321,7 +322,7 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
APIGroups: []string{"actions.summerwind.dev"},
Resources: []string{"runners/status"},
Verbs: []string{"get", "update", "patch"},
ResourceNames: []string{runner.ObjectMeta.Name},
ResourceNames: []string{runner.Name},
},
}...)
}
@@ -359,8 +360,8 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
role := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: runner.ObjectMeta.Name,
Namespace: runner.ObjectMeta.Namespace,
Name: runner.Name,
Namespace: runner.Namespace,
},
Rules: rules,
}
@@ -370,19 +371,19 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
roleBinding := &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: runner.ObjectMeta.Name,
Namespace: runner.ObjectMeta.Namespace,
Name: runner.Name,
Namespace: runner.Namespace,
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: runner.ObjectMeta.Name,
Name: runner.Name,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: runner.ObjectMeta.Name,
Namespace: runner.ObjectMeta.Namespace,
Name: runner.Name,
Namespace: runner.Namespace,
},
},
}
@@ -482,7 +483,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
labels := map[string]string{}
for k, v := range runner.ObjectMeta.Labels {
for k, v := range runner.Labels {
labels[k] = v
}
@@ -511,8 +512,8 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
//
// See https://github.com/actions/actions-runner-controller/issues/143 for more context.
labels[LabelKeyPodTemplateHash] = hash.FNVHashStringObjects(
filterLabels(runner.ObjectMeta.Labels, LabelKeyRunnerTemplateHash),
runner.ObjectMeta.Annotations,
filterLabels(runner.Labels, LabelKeyRunnerTemplateHash),
runner.Annotations,
runner.Spec,
ghc.GithubBaseURL,
// Token change should trigger replacement.
@@ -523,10 +524,10 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
)
objectMeta := metav1.ObjectMeta{
Name: runner.ObjectMeta.Name,
Namespace: runner.ObjectMeta.Namespace,
Name: runner.Name,
Namespace: runner.Namespace,
Labels: labels,
Annotations: runner.ObjectMeta.Annotations,
Annotations: runner.Annotations,
}
template.ObjectMeta = objectMeta
@@ -649,7 +650,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
if runnerSpec.ServiceAccountName != "" {
pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName
} else if r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes" {
pod.Spec.ServiceAccountName = runner.ObjectMeta.Name
pod.Spec.ServiceAccountName = runner.Name
}
if runnerSpec.AutomountServiceAccountToken != nil {
@@ -704,7 +705,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
pod.Spec.RuntimeClassName = runnerSpec.RuntimeClassName
}
pod.ObjectMeta.Name = runner.ObjectMeta.Name
pod.Name = runner.Name
// Inject the registration token and the runner name
updated := mutatePod(&pod, runner.Status.Registration.Token)
@@ -720,7 +721,7 @@ func mutatePod(pod *corev1.Pod, token string) *corev1.Pod {
updated := pod.DeepCopy()
if getRunnerEnv(pod, EnvVarRunnerName) == "" {
setRunnerEnv(updated, EnvVarRunnerName, pod.ObjectMeta.Name)
setRunnerEnv(updated, EnvVarRunnerName, pod.Name)
}
if getRunnerEnv(pod, EnvVarRunnerToken) == "" {
@@ -770,11 +771,11 @@ func runnerHookEnvs(pod *corev1.Pod) ([]corev1.EnvVar, error) {
func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, githubBaseURL string, d RunnerPodDefaults) (corev1.Pod, error) {
var (
privileged bool = true
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
dockerEnabled bool = runnerSpec.DockerEnabled == nil || *runnerSpec.DockerEnabled
ephemeral bool = runnerSpec.Ephemeral == nil || *runnerSpec.Ephemeral
dockerdInRunnerPrivileged bool = dockerdInRunner
privileged = true
dockerdInRunner = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
dockerEnabled = runnerSpec.DockerEnabled == nil || *runnerSpec.DockerEnabled
ephemeral = runnerSpec.Ephemeral == nil || *runnerSpec.Ephemeral
dockerdInRunnerPrivileged = dockerdInRunner
defaultRunnerImage = d.RunnerImage
defaultRunnerImagePullSecrets = d.RunnerImagePullSecrets
@@ -797,10 +798,10 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
template = *template.DeepCopy()
// This label selector is used by default when rd.Spec.Selector is empty.
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunner, "")
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyPodMutation, LabelValuePodMutation)
template.Labels = CloneAndAddLabel(template.Labels, LabelKeyRunner, "")
template.Labels = CloneAndAddLabel(template.Labels, LabelKeyPodMutation, LabelValuePodMutation)
if runnerSpec.GitHubAPICredentialsFrom != nil {
template.ObjectMeta.Annotations = CloneAndAddLabel(template.ObjectMeta.Annotations, annotationKeyGitHubAPICredsSecret, runnerSpec.GitHubAPICredentialsFrom.SecretRef.Name)
template.Annotations = CloneAndAddLabel(template.Annotations, annotationKeyGitHubAPICredsSecret, runnerSpec.GitHubAPICredentialsFrom.SecretRef.Name)
}
workDir := runnerSpec.WorkDir
@@ -887,10 +888,11 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
for i := range template.Spec.Containers {
c := template.Spec.Containers[i]
if c.Name == containerName {
switch c.Name {
case containerName:
runnerContainerIndex = i
runnerContainer = &c
} else if c.Name == "docker" {
case "docker":
dockerdContainerIndex = i
dockerdContainer = &c
}
@@ -1364,7 +1366,7 @@ func applyWorkVolumeClaimTemplateToPod(pod *corev1.Pod, workVolumeClaimTemplate
}
for i := range pod.Spec.Volumes {
if pod.Spec.Volumes[i].Name == "work" {
return fmt.Errorf("Work volume should not be specified in container mode kubernetes. workVolumeClaimTemplate field should be used instead.")
return fmt.Errorf("work volume should not be specified in container mode kubernetes. workVolumeClaimTemplate field should be used instead")
}
}
pod.Spec.Volumes = append(pod.Spec.Volumes, workVolumeClaimTemplate.V1Volume())

View File

@@ -79,7 +79,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
}
if len(envvars) == 0 {
return ctrl.Result{}, errors.New("Could not determine env vars for runner Pod")
return ctrl.Result{}, errors.New("could not determine env vars for runner Pod")
}
var enterprise, org, repo string
@@ -103,8 +103,8 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{}, err
}
if runnerPod.ObjectMeta.DeletionTimestamp.IsZero() {
finalizers, added := addFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
if runnerPod.DeletionTimestamp.IsZero() {
finalizers, added := addFinalizer(runnerPod.Finalizers, runnerPodFinalizerName)
var cleanupFinalizersAdded bool
if isContainerMode {
@@ -113,7 +113,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
if added || cleanupFinalizersAdded {
newRunner := runnerPod.DeepCopy()
newRunner.ObjectMeta.Finalizers = finalizers
newRunner.Finalizers = finalizers
if err := r.Patch(ctx, newRunner, client.MergeFrom(&runnerPod)); err != nil {
log.Error(err, "Failed to update runner")
@@ -142,7 +142,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
}
}
if finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerLinkedResourcesFinalizerName); removed {
if finalizers, removed := removeFinalizer(runnerPod.Finalizers, runnerLinkedResourcesFinalizerName); removed {
if err := r.cleanupRunnerLinkedPods(ctx, &runnerPod, log); err != nil {
log.Info("Runner-linked pods clean up that has failed due to an error. If this persists, please manually remove the runner-linked pods to unblock ARC", "err", err.Error())
return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil
@@ -152,7 +152,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil
}
patchedPod := runnerPod.DeepCopy()
patchedPod.ObjectMeta.Finalizers = finalizers
patchedPod.Finalizers = finalizers
if err := r.Patch(ctx, patchedPod, client.MergeFrom(&runnerPod)); err != nil {
log.Error(err, "Failed to update runner for finalizer linked resources removal")
@@ -163,7 +163,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
runnerPod = *patchedPod
}
finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
finalizers, removed := removeFinalizer(runnerPod.Finalizers, runnerPodFinalizerName)
if removed {
// In a standard scenario, the upstream controller, like runnerset-controller, ensures this runner to be gracefully stopped before the deletion timestamp is set.
@@ -175,7 +175,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
}
patchedPod := updatedPod.DeepCopy()
patchedPod.ObjectMeta.Finalizers = finalizers
patchedPod.Finalizers = finalizers
// We commit the removal of the finalizer so that Kuberenetes notices it and delete the pod resource from the cluster.
if err := r.Patch(ctx, patchedPod, client.MergeFrom(&runnerPod)); err != nil {
@@ -284,7 +284,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedPods(ctx context.Context, pod *
var runnerLinkedPodList corev1.PodList
if err := r.List(ctx, &runnerLinkedPodList, client.InNamespace(pod.Namespace), client.MatchingLabels(
map[string]string{
"runner-pod": pod.ObjectMeta.Name,
"runner-pod": pod.Name,
},
)); err != nil {
return fmt.Errorf("failed to list runner-linked pods: %w", err)
@@ -295,7 +295,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedPods(ctx context.Context, pod *
errs []error
)
for _, p := range runnerLinkedPodList.Items {
if !p.ObjectMeta.DeletionTimestamp.IsZero() {
if !p.DeletionTimestamp.IsZero() {
continue
}
@@ -307,7 +307,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedPods(ctx context.Context, pod *
if kerrors.IsNotFound(err) || kerrors.IsGone(err) {
return
}
errs = append(errs, fmt.Errorf("delete pod %q error: %v", p.ObjectMeta.Name, err))
errs = append(errs, fmt.Errorf("delete pod %q error: %v", p.Name, err))
}
}()
}
@@ -330,7 +330,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, po
var runnerLinkedSecretList corev1.SecretList
if err := r.List(ctx, &runnerLinkedSecretList, client.InNamespace(pod.Namespace), client.MatchingLabels(
map[string]string{
"runner-pod": pod.ObjectMeta.Name,
"runner-pod": pod.Name,
},
)); err != nil {
return fmt.Errorf("failed to list runner-linked secrets: %w", err)
@@ -341,7 +341,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, po
errs []error
)
for _, s := range runnerLinkedSecretList.Items {
if !s.ObjectMeta.DeletionTimestamp.IsZero() {
if !s.DeletionTimestamp.IsZero() {
continue
}
@@ -353,7 +353,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, po
if kerrors.IsNotFound(err) || kerrors.IsGone(err) {
return
}
errs = append(errs, fmt.Errorf("delete secret %q error: %v", s.ObjectMeta.Name, err))
errs = append(errs, fmt.Errorf("delete secret %q error: %v", s.Name, err))
}
}()
}

View File

@@ -90,7 +90,7 @@ var _ owner = (*ownerStatefulSet)(nil)
func (s *ownerStatefulSet) pods(ctx context.Context, c client.Client) ([]corev1.Pod, error) {
var podList corev1.PodList
if err := c.List(ctx, &podList, client.MatchingLabels(s.StatefulSet.Spec.Template.ObjectMeta.Labels)); err != nil {
if err := c.List(ctx, &podList, client.MatchingLabels(s.StatefulSet.Spec.Template.Labels)); err != nil {
s.Log.Error(err, "Failed to list pods managed by statefulset")
return nil, err
}

View File

@@ -73,7 +73,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !rd.ObjectMeta.DeletionTimestamp.IsZero() {
if !rd.DeletionTimestamp.IsZero() {
return ctrl.Result{}, nil
}
@@ -112,7 +112,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
}
if newestSet == nil {
if err := r.Client.Create(ctx, desiredRS); err != nil {
if err := r.Create(ctx, desiredRS); err != nil {
log.Error(err, "Failed to create runnerreplicaset resource")
return ctrl.Result{}, err
@@ -138,7 +138,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
}
if newestTemplateHash != desiredTemplateHash {
if err := r.Client.Create(ctx, desiredRS); err != nil {
if err := r.Create(ctx, desiredRS); err != nil {
log.Error(err, "Failed to create runnerreplicaset resource")
return ctrl.Result{}, err
@@ -159,7 +159,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
// but we still need to update the existing replicaset with it.
// Otherwise selector-based runner query will never work on replicasets created before the controller v0.17.0
// See https://github.com/actions/actions-runner-controller/pull/355#discussion_r585379259
if err := r.Client.Update(ctx, updateSet); err != nil {
if err := r.Update(ctx, updateSet); err != nil {
log.Error(err, "Failed to update runnerreplicaset resource")
return ctrl.Result{}, err
@@ -195,7 +195,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
newestSet.Spec.Replicas = &newDesiredReplicas
newestSet.Spec.EffectiveTime = rd.Spec.EffectiveTime
if err := r.Client.Update(ctx, newestSet); err != nil {
if err := r.Update(ctx, newestSet); err != nil {
log.Error(err, "Failed to update runnerreplicaset resource")
return ctrl.Result{}, err
@@ -257,7 +257,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
updated := rs.DeepCopy()
zero := 0
updated.Spec.Replicas = &zero
if err := r.Client.Update(ctx, updated); err != nil {
if err := r.Update(ctx, updated); err != nil {
rslog.Error(err, "Failed to scale runnerreplicaset to zero")
return ctrl.Result{}, err
@@ -268,7 +268,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
continue
}
if err := r.Client.Delete(ctx, &rs); err != nil {
if err := r.Delete(ctx, &rs); err != nil {
rslog.Error(err, "Failed to delete runnerreplicaset resource")
return ctrl.Result{}, err
@@ -445,10 +445,10 @@ func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []str
templateHash := ComputeHash(&newRSTemplate)
// Add template hash label to selector.
newRSTemplate.ObjectMeta.Labels = CloneAndAddLabel(newRSTemplate.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
newRSTemplate.Labels = CloneAndAddLabel(newRSTemplate.Labels, LabelKeyRunnerTemplateHash, templateHash)
// This label selector is used by default when rd.Spec.Selector is empty.
newRSTemplate.ObjectMeta.Labels = CloneAndAddLabel(newRSTemplate.ObjectMeta.Labels, LabelKeyRunnerDeploymentName, rd.Name)
newRSTemplate.Labels = CloneAndAddLabel(newRSTemplate.Labels, LabelKeyRunnerDeploymentName, rd.Name)
selector := getSelector(rd)
@@ -457,9 +457,9 @@ func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []str
rs := v1alpha1.RunnerReplicaSet{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
GenerateName: rd.ObjectMeta.Name + "-",
Namespace: rd.ObjectMeta.Namespace,
Labels: newRSTemplate.ObjectMeta.Labels,
GenerateName: rd.Name + "-",
Namespace: rd.Namespace,
Labels: newRSTemplate.Labels,
},
Spec: v1alpha1.RunnerReplicaSetSpec{
Replicas: rd.Spec.Replicas,

View File

@@ -62,7 +62,7 @@ func (r *RunnerReplicaSetReconciler) Reconcile(ctx context.Context, req ctrl.Req
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if !rs.ObjectMeta.DeletionTimestamp.IsZero() {
if !rs.DeletionTimestamp.IsZero() {
// RunnerReplicaSet cannot be gracefuly removed.
// That means any runner that is running a job can be prematurely terminated.
// To gracefully remove a RunnerReplicaSet, scale it down to zero first, observe RunnerReplicaSet's status replicas,
@@ -70,14 +70,14 @@ func (r *RunnerReplicaSetReconciler) Reconcile(ctx context.Context, req ctrl.Req
return ctrl.Result{}, nil
}
if rs.ObjectMeta.Labels == nil {
rs.ObjectMeta.Labels = map[string]string{}
if rs.Labels == nil {
rs.Labels = map[string]string{}
}
// Template hash is usually set by the upstream controller(RunnerDeplloyment controller) on authoring
// RunerReplicaset resource, but it may be missing when the user directly created RunnerReplicaSet.
// As a template hash is required by by the runner replica management, we dynamically add it here without ever persisting it.
if rs.ObjectMeta.Labels[LabelKeyRunnerTemplateHash] == "" {
if rs.Labels[LabelKeyRunnerTemplateHash] == "" {
template := rs.Spec.DeepCopy()
template.Replicas = nil
template.EffectiveTime = nil
@@ -85,8 +85,8 @@ func (r *RunnerReplicaSetReconciler) Reconcile(ctx context.Context, req ctrl.Req
log.Info("Using auto-generated template hash", "value", templateHash)
rs.ObjectMeta.Labels = CloneAndAddLabel(rs.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
rs.Spec.Template.ObjectMeta.Labels = CloneAndAddLabel(rs.Spec.Template.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
rs.Labels = CloneAndAddLabel(rs.Labels, LabelKeyRunnerTemplateHash, templateHash)
rs.Spec.Template.Labels = CloneAndAddLabel(rs.Spec.Template.Labels, LabelKeyRunnerTemplateHash, templateHash)
}
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
@@ -169,8 +169,8 @@ func (r *RunnerReplicaSetReconciler) newRunner(rs v1alpha1.RunnerReplicaSet) (v1
// the "runner template hash" label to the template.meta which is necessary to make this controller work correctly
objectMeta := rs.Spec.Template.ObjectMeta.DeepCopy()
objectMeta.GenerateName = rs.ObjectMeta.Name + "-"
objectMeta.Namespace = rs.ObjectMeta.Namespace
objectMeta.GenerateName = rs.Name + "-"
objectMeta.Namespace = rs.Namespace
if objectMeta.Annotations == nil {
objectMeta.Annotations = map[string]string{}
}

View File

@@ -77,7 +77,7 @@ func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{}, err
}
if !runnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
if !runnerSet.DeletionTimestamp.IsZero() {
r.GitHubClient.DeinitForRunnerSet(runnerSet)
return ctrl.Result{}, nil
@@ -191,11 +191,11 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, r.CommonRunnerLabels...)
template := corev1.Pod{
ObjectMeta: runnerSetWithOverrides.StatefulSetSpec.Template.ObjectMeta,
Spec: runnerSetWithOverrides.StatefulSetSpec.Template.Spec,
ObjectMeta: runnerSetWithOverrides.Template.ObjectMeta,
Spec: runnerSetWithOverrides.Template.Spec,
}
if runnerSet.Spec.RunnerConfig.ContainerMode == "kubernetes" {
if runnerSet.Spec.ContainerMode == "kubernetes" {
found := false
for i := range template.Spec.Containers {
if template.Spec.Containers[i].Name == containerName {
@@ -208,7 +208,7 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
})
}
workDir := runnerSet.Spec.RunnerConfig.WorkDir
workDir := runnerSet.Spec.WorkDir
if workDir == "" {
workDir = "/runner/_work"
}
@@ -219,7 +219,7 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
template.Spec.ServiceAccountName = runnerSet.Spec.ServiceAccountName
}
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunnerSetName, runnerSet.Name)
template.Labels = CloneAndAddLabel(template.Labels, LabelKeyRunnerSetName, runnerSet.Name)
ghc, err := r.GitHubClient.InitForRunnerSet(ctx, runnerSet)
if err != nil {
@@ -228,38 +228,38 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
githubBaseURL := ghc.GithubBaseURL
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, githubBaseURL, r.RunnerPodDefaults)
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.ContainerMode, template, runnerSet.Spec.RunnerConfig, githubBaseURL, r.RunnerPodDefaults)
if err != nil {
return nil, err
}
runnerSetWithOverrides.StatefulSetSpec.Template.ObjectMeta = pod.ObjectMeta
runnerSetWithOverrides.StatefulSetSpec.Template.Spec = pod.Spec
runnerSetWithOverrides.Template.ObjectMeta = pod.ObjectMeta
runnerSetWithOverrides.Template.Spec = pod.Spec
// NOTE: Seems like the only supported restart policy for statefulset is "Always"?
// I got errosr like the below when tried to use "OnFailure":
// StatefulSet.apps \"example-runnersetpg9rx\" is invalid: [spec.template.metadata.labels: Invalid value: map[string]string{\"runner-template-hash\"
// :\"85d7578bd6\", \"runnerset-name\":\"example-runnerset\"}: `selector` does not match template `labels`, spec.
// template.spec.restartPolicy: Unsupported value: \"OnFailure\": supported values: \"Always\"]
runnerSetWithOverrides.StatefulSetSpec.Template.Spec.RestartPolicy = corev1.RestartPolicyAlways
runnerSetWithOverrides.Template.Spec.RestartPolicy = corev1.RestartPolicyAlways
templateHash := ComputeHash(pod.Spec)
// Add template hash label to selector.
runnerSetWithOverrides.Template.ObjectMeta.Labels = CloneAndAddLabel(runnerSetWithOverrides.Template.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
runnerSetWithOverrides.Template.Labels = CloneAndAddLabel(runnerSetWithOverrides.Template.Labels, LabelKeyRunnerTemplateHash, templateHash)
selector := getRunnerSetSelector(runnerSet)
selector = CloneSelectorAndAddLabel(selector, LabelKeyRunnerTemplateHash, templateHash)
selector = CloneSelectorAndAddLabel(selector, LabelKeyRunnerSetName, runnerSet.Name)
selector = CloneSelectorAndAddLabel(selector, LabelKeyPodMutation, LabelValuePodMutation)
runnerSetWithOverrides.StatefulSetSpec.Selector = selector
runnerSetWithOverrides.Selector = selector
rs := appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
GenerateName: runnerSet.ObjectMeta.Name + "-",
Namespace: runnerSet.ObjectMeta.Namespace,
Labels: CloneAndAddLabel(runnerSet.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash),
GenerateName: runnerSet.Name + "-",
Namespace: runnerSet.Namespace,
Labels: CloneAndAddLabel(runnerSet.Labels, LabelKeyRunnerTemplateHash, templateHash),
Annotations: map[string]string{
SyncTimeAnnotationKey: time.Now().Format(time.RFC3339),
},

View File

@@ -23,7 +23,7 @@ const (
func syncVolumes(ctx context.Context, c client.Client, log logr.Logger, ns string, runnerSet *v1alpha1.RunnerSet, statefulsets []appsv1.StatefulSet) (*ctrl.Result, error) {
log = log.WithValues("ns", ns)
for _, t := range runnerSet.Spec.StatefulSetSpec.VolumeClaimTemplates {
for _, t := range runnerSet.Spec.VolumeClaimTemplates {
for _, sts := range statefulsets {
pvcName := fmt.Sprintf("%s-%s-0", t.Name, sts.Name)

View File

@@ -16,7 +16,7 @@ type testResourceReader struct {
}
func (r *testResourceReader) Get(_ context.Context, key client.ObjectKey, obj client.Object, _ ...client.GetOption) error {
nsName := types.NamespacedName{Namespace: key.Namespace, Name: key.Name}
nsName := types.NamespacedName(key)
ret, ok := r.objects[nsName]
if !ok {
return &kerrors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}}

View File

@@ -64,22 +64,22 @@ func Test_workVolumeClaimTemplateVolumeV1VolumeTransformation(t *testing.T) {
t.Errorf("want name %q, got %q\n", want.Name, got.Name)
}
if got.VolumeSource.Ephemeral == nil {
if got.Ephemeral == nil {
t.Fatal("work volume claim template should transform itself into Ephemeral volume source\n")
}
if got.VolumeSource.Ephemeral.VolumeClaimTemplate == nil {
if got.Ephemeral.VolumeClaimTemplate == nil {
t.Fatal("work volume claim template should have ephemeral volume claim template set\n")
}
gotClassName := *got.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
wantClassName := *want.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
gotClassName := *got.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
wantClassName := *want.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
if gotClassName != wantClassName {
t.Errorf("expected storage class name %q, got %q\n", wantClassName, gotClassName)
}
gotAccessModes := got.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
wantAccessModes := want.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
gotAccessModes := got.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
wantAccessModes := want.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
if len(gotAccessModes) != len(wantAccessModes) {
t.Fatalf("access modes lengths missmatch: got %v, expected %v\n", gotAccessModes, wantAccessModes)
}

View File

@@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"io"
"maps"
"math/rand"
"net/http"
"net/url"
@@ -273,16 +274,16 @@ func (c *Client) Identifier() string {
func (c *Client) Do(req *http.Request) (*http.Response, error) {
resp, err := c.Client.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("client request failed: %w", err)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to read the response body: %w", err)
}
err = resp.Body.Close()
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to close the response body: %w", err)
}
body = trimByteOrderMark(body)
@@ -294,7 +295,7 @@ func (c *Client) NewGitHubAPIRequest(ctx context.Context, method, path string, b
u := c.config.GitHubAPIURL(path)
req, err := http.NewRequestWithContext(ctx, method, u.String(), body)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new GitHub API request: %w", err)
}
req.Header.Set("User-Agent", c.userAgent.String())
@@ -305,28 +306,27 @@ func (c *Client) NewGitHubAPIRequest(ctx context.Context, method, path string, b
func (c *Client) NewActionsServiceRequest(ctx context.Context, method, path string, body io.Reader) (*http.Request, error) {
err := c.updateTokenIfNeeded(ctx)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue update token if needed: %w", err)
}
parsedPath, err := url.Parse(path)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to parse path %q: %w", path, err)
}
urlString, err := url.JoinPath(c.ActionsServiceURL, parsedPath.Path)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to join path (actions_service_url=%q, parsedPath=%q): %w", c.ActionsServiceURL, parsedPath.Path, err)
}
u, err := url.Parse(urlString)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to parse url string %q: %w", urlString, err)
}
q := u.Query()
for k, v := range parsedPath.Query() {
q[k] = v
}
maps.Copy(q, parsedPath.Query())
if q.Get("api-version") == "" {
q.Set("api-version", "6.0-preview")
}
@@ -334,7 +334,7 @@ func (c *Client) NewActionsServiceRequest(ctx context.Context, method, path stri
req, err := http.NewRequestWithContext(ctx, method, u.String(), body)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new request with context: %w", err)
}
req.Header.Set("Content-Type", "application/json")
@@ -348,12 +348,12 @@ func (c *Client) GetRunnerScaleSet(ctx context.Context, runnerGroupId int, runne
path := fmt.Sprintf("/%s?runnerGroupId=%d&name=%s", scaleSetEndpoint, runnerGroupId, runnerScaleSetName)
req, err := c.NewActionsServiceRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new actions service request: %w", err)
}
resp, err := c.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode != http.StatusOK {
@@ -386,12 +386,12 @@ func (c *Client) GetRunnerScaleSetById(ctx context.Context, runnerScaleSetId int
path := fmt.Sprintf("/%s/%d", scaleSetEndpoint, runnerScaleSetId)
req, err := c.NewActionsServiceRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new actions service request: %w", err)
}
resp, err := c.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode != http.StatusOK {
@@ -413,12 +413,12 @@ func (c *Client) GetRunnerGroupByName(ctx context.Context, runnerGroup string) (
path := fmt.Sprintf("/_apis/runtime/runnergroups/?groupName=%s", runnerGroup)
req, err := c.NewActionsServiceRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new actions service request: %w", err)
}
resp, err := c.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode != http.StatusOK {
@@ -469,17 +469,17 @@ func (c *Client) GetRunnerGroupByName(ctx context.Context, runnerGroup string) (
func (c *Client) CreateRunnerScaleSet(ctx context.Context, runnerScaleSet *RunnerScaleSet) (*RunnerScaleSet, error) {
body, err := json.Marshal(runnerScaleSet)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to marshal runner scale set: %w", err)
}
req, err := c.NewActionsServiceRequest(ctx, http.MethodPost, scaleSetEndpoint, bytes.NewReader(body))
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new actions service request: %w", err)
}
resp, err := c.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode != http.StatusOK {
@@ -501,17 +501,17 @@ func (c *Client) UpdateRunnerScaleSet(ctx context.Context, runnerScaleSetId int,
body, err := json.Marshal(runnerScaleSet)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to marshal runner scale set: %w", err)
}
req, err := c.NewActionsServiceRequest(ctx, http.MethodPatch, path, bytes.NewReader(body))
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new actions service request: %w", err)
}
resp, err := c.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode != http.StatusOK {
@@ -533,12 +533,12 @@ func (c *Client) DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetId int)
path := fmt.Sprintf("/%s/%d", scaleSetEndpoint, runnerScaleSetId)
req, err := c.NewActionsServiceRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return err
return fmt.Errorf("failed to create new actions service request: %w", err)
}
resp, err := c.Do(req)
if err != nil {
return err
return fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode != http.StatusNoContent {
@@ -552,7 +552,7 @@ func (c *Client) DeleteRunnerScaleSet(ctx context.Context, runnerScaleSetId int)
func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*RunnerScaleSetMessage, error) {
u, err := url.Parse(messageQueueUrl)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to parse message queue url: %w", err)
}
if lastMessageId > 0 {
@@ -567,7 +567,7 @@ func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAc
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new request with context: %w", err)
}
req.Header.Set("Accept", "application/json; api-version=6.0-preview")
@@ -577,7 +577,7 @@ func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAc
resp, err := c.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode == http.StatusAccepted {
@@ -621,14 +621,14 @@ func (c *Client) GetMessage(ctx context.Context, messageQueueUrl, messageQueueAc
func (c *Client) DeleteMessage(ctx context.Context, messageQueueUrl, messageQueueAccessToken string, messageId int64) error {
u, err := url.Parse(messageQueueUrl)
if err != nil {
return err
return fmt.Errorf("failed to parse message queue url: %w", err)
}
u.Path = fmt.Sprintf("%s/%d", u.Path, messageId)
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, u.String(), nil)
if err != nil {
return err
return fmt.Errorf("failed to create new request with context: %w", err)
}
req.Header.Set("Content-Type", "application/json")
@@ -637,7 +637,7 @@ func (c *Client) DeleteMessage(ctx context.Context, messageQueueUrl, messageQueu
resp, err := c.Do(req)
if err != nil {
return err
return fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode != http.StatusNoContent {
@@ -673,14 +673,16 @@ func (c *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int,
requestData, err := json.Marshal(newSession)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to marshal new session: %w", err)
}
createdSession := &RunnerScaleSetSession{}
err = c.doSessionRequest(ctx, http.MethodPost, path, bytes.NewBuffer(requestData), http.StatusOK, createdSession)
if err = c.doSessionRequest(ctx, http.MethodPost, path, bytes.NewBuffer(requestData), http.StatusOK, createdSession); err != nil {
return nil, fmt.Errorf("failed to do the session request: %w", err)
}
return createdSession, err
return createdSession, nil
}
func (c *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error {
@@ -691,19 +693,21 @@ func (c *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int,
func (c *Client) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*RunnerScaleSetSession, error) {
path := fmt.Sprintf("/%s/%d/sessions/%s", scaleSetEndpoint, runnerScaleSetId, sessionId.String())
refreshedSession := &RunnerScaleSetSession{}
err := c.doSessionRequest(ctx, http.MethodPatch, path, nil, http.StatusOK, refreshedSession)
return refreshedSession, err
if err := c.doSessionRequest(ctx, http.MethodPatch, path, nil, http.StatusOK, refreshedSession); err != nil {
return nil, fmt.Errorf("failed to do the session request: %w", err)
}
return refreshedSession, nil
}
func (c *Client) doSessionRequest(ctx context.Context, method, path string, requestData io.Reader, expectedResponseStatusCode int, responseUnmarshalTarget any) error {
req, err := c.NewActionsServiceRequest(ctx, method, path, requestData)
if err != nil {
return err
return fmt.Errorf("failed to create new actions service request: %w", err)
}
resp, err := c.Do(req)
if err != nil {
return err
return fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode == expectedResponseStatusCode {
@@ -749,12 +753,12 @@ func (c *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQ
body, err := json.Marshal(requestIds)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to marshal request ids: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, bytes.NewBuffer(body))
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new request with context: %w", err)
}
req.Header.Set("Content-Type", "application/json")
@@ -763,7 +767,7 @@ func (c *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQ
resp, err := c.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode != http.StatusOK {
@@ -807,12 +811,12 @@ func (c *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*
req, err := c.NewActionsServiceRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new actions service request: %w", err)
}
resp, err := c.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode == http.StatusNoContent {
@@ -842,17 +846,17 @@ func (c *Client) GenerateJitRunnerConfig(ctx context.Context, jitRunnerSetting *
body, err := json.Marshal(jitRunnerSetting)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to marshal runner settings: %w", err)
}
req, err := c.NewActionsServiceRequest(ctx, http.MethodPost, path, bytes.NewBuffer(body))
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new actions service request: %w", err)
}
resp, err := c.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode != http.StatusOK {
@@ -875,12 +879,12 @@ func (c *Client) GetRunner(ctx context.Context, runnerId int64) (*RunnerReferenc
req, err := c.NewActionsServiceRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new actions service request: %w", err)
}
resp, err := c.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode != http.StatusOK {
@@ -904,12 +908,12 @@ func (c *Client) GetRunnerByName(ctx context.Context, runnerName string) (*Runne
req, err := c.NewActionsServiceRequest(ctx, http.MethodGet, path, nil)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new actions service request: %w", err)
}
resp, err := c.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode != http.StatusOK {
@@ -945,12 +949,12 @@ func (c *Client) RemoveRunner(ctx context.Context, runnerId int64) error {
req, err := c.NewActionsServiceRequest(ctx, http.MethodDelete, path, nil)
if err != nil {
return err
return fmt.Errorf("failed to create new actions service request: %w", err)
}
resp, err := c.Do(req)
if err != nil {
return err
return fmt.Errorf("failed to issue the request: %w", err)
}
if resp.StatusCode != http.StatusNoContent {
@@ -969,13 +973,13 @@ type registrationToken struct {
func (c *Client) getRunnerRegistrationToken(ctx context.Context) (*registrationToken, error) {
path, err := createRegistrationTokenPath(c.config)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create registration token path: %w", err)
}
var buf bytes.Buffer
req, err := c.NewGitHubAPIRequest(ctx, http.MethodPost, path, &buf)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new GitHub API request: %w", err)
}
bearerToken := ""
@@ -985,7 +989,7 @@ func (c *Client) getRunnerRegistrationToken(ctx context.Context) (*registrationT
} else {
accessToken, err := c.fetchAccessToken(ctx, c.config.ConfigURL.String(), c.creds.AppCreds)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to fetch access token: %w", err)
}
bearerToken = fmt.Sprintf("Bearer %v", accessToken.Token)
@@ -998,14 +1002,14 @@ func (c *Client) getRunnerRegistrationToken(ctx context.Context) (*registrationT
resp, err := c.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue the request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to read the body: %w", err)
}
return nil, &GitHubAPIError{
StatusCode: resp.StatusCode,
@@ -1035,13 +1039,13 @@ type accessToken struct {
func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, creds *GitHubAppAuth) (*accessToken, error) {
accessTokenJWT, err := createJWTForGitHubApp(creds)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create JWT for GitHub app: %w", err)
}
path := fmt.Sprintf("/app/installations/%v/access_tokens", creds.AppInstallationID)
req, err := c.NewGitHubAPIRequest(ctx, http.MethodPost, path, nil)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new GitHub API request: %w", err)
}
req.Header.Set("Content-Type", "application/vnd.github+json")
@@ -1051,7 +1055,7 @@ func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, c
resp, err := c.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue the request: %w", err)
}
defer resp.Body.Close()
@@ -1096,12 +1100,12 @@ func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *regis
enc.SetEscapeHTML(false)
if err := enc.Encode(body); err != nil {
return nil, err
return nil, fmt.Errorf("failed to encode body: %w", err)
}
req, err := c.NewGitHubAPIRequest(ctx, http.MethodPost, path, buf)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to create new GitHub API request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
@@ -1115,7 +1119,7 @@ func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *regis
var err error
resp, err = c.Do(req)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to issue the request: %w", err)
}
defer resp.Body.Close()
@@ -1215,7 +1219,7 @@ func createJWTForGitHubApp(appAuth *GitHubAppAuth) (string, error) {
privateKey, err := jwt.ParseRSAPrivateKeyFromPEM([]byte(appAuth.AppPrivateKey))
if err != nil {
return "", err
return "", fmt.Errorf("failed to parse RSA private key from PEM: %w", err)
}
return token.SignedString(privateKey)

View File

@@ -54,7 +54,7 @@ func TestAcquireJobs(t *testing.T) {
RunnerScaleSet: &actions.RunnerScaleSet{Id: 1},
MessageQueueAccessToken: "abc",
}
var requestIDs []int64 = []int64{1}
var requestIDs = []int64{1}
retryMax := 1
actualRetry := 0

View File

@@ -101,8 +101,7 @@ func TestCreateMessageSession(t *testing.T) {
err,
)
gotErr := err.(*actions.ActionsError)
assert.Equal(t, want, gotErr)
assert.Equal(t, want, errorTypeForComparison)
})
t.Run("CreateMessageSession call is retried the correct amount of times", func(t *testing.T) {

View File

@@ -67,7 +67,7 @@ func TestGetRunnerByName(t *testing.T) {
t.Run("Get Runner by Name", func(t *testing.T) {
var runnerID int64 = 1
var runnerName string = "self-hosted-ubuntu"
var runnerName = "self-hosted-ubuntu"
want := &actions.RunnerReference{
Id: int(runnerID),
Name: runnerName,
@@ -87,7 +87,7 @@ func TestGetRunnerByName(t *testing.T) {
})
t.Run("Get Runner by name with not exist runner", func(t *testing.T) {
var runnerName string = "self-hosted-ubuntu"
var runnerName = "self-hosted-ubuntu"
response := []byte(`{"count": 0, "value": []}`)
server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -103,7 +103,7 @@ func TestGetRunnerByName(t *testing.T) {
})
t.Run("Default retries on server error", func(t *testing.T) {
var runnerName string = "self-hosted-ubuntu"
var runnerName = "self-hosted-ubuntu"
retryWaitMax := 1 * time.Millisecond
retryMax := 1
@@ -181,7 +181,7 @@ func TestGetRunnerGroupByName(t *testing.T) {
t.Run("Get RunnerGroup by Name", func(t *testing.T) {
var runnerGroupID int64 = 1
var runnerGroupName string = "test-runner-group"
var runnerGroupName = "test-runner-group"
want := &actions.RunnerGroup{
ID: runnerGroupID,
Name: runnerGroupName,
@@ -201,7 +201,7 @@ func TestGetRunnerGroupByName(t *testing.T) {
})
t.Run("Get RunnerGroup by name with not exist runner group", func(t *testing.T) {
var runnerGroupName string = "test-runner-group"
var runnerGroupName = "test-runner-group"
response := []byte(`{"count": 0, "value": []}`)
server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {

View File

@@ -127,7 +127,7 @@ func NewServer(opts ...Option) *httptest.Server {
},
// For ListRunners
"/repos/test/valid/actions/runners": config.FixedResponses.ListRunners,
"/repos/test/valid/actions/runners": config.ListRunners,
"/repos/test/invalid/actions/runners": &Handler{
Status: http.StatusNoContent,
Body: "",
@@ -204,10 +204,10 @@ func NewServer(opts ...Option) *httptest.Server {
},
// For auto-scaling based on the number of queued(pending) workflow runs
"/repos/test/valid/actions/runs": config.FixedResponses.ListRepositoryWorkflowRuns,
"/repos/test/valid/actions/runs": config.ListRepositoryWorkflowRuns,
// For auto-scaling based on the number of queued(pending) workflow jobs
"/repos/test/valid/actions/runs/": config.FixedResponses.ListWorkflowJobs,
"/repos/test/valid/actions/runs/": config.ListWorkflowJobs,
}
mux := http.NewServeMux()

View File

@@ -12,7 +12,7 @@ type Option func(*ServerConfig)
func WithListRepositoryWorkflowRunsResponse(status int, body, queued, in_progress string) Option {
return func(c *ServerConfig) {
c.FixedResponses.ListRepositoryWorkflowRuns = &Handler{
c.ListRepositoryWorkflowRuns = &Handler{
Status: status,
Body: body,
Statuses: map[string]string{
@@ -25,7 +25,7 @@ func WithListRepositoryWorkflowRunsResponse(status int, body, queued, in_progres
func WithListWorkflowJobsResponse(status int, bodies map[int]string) Option {
return func(c *ServerConfig) {
c.FixedResponses.ListWorkflowJobs = &MapHandler{
c.ListWorkflowJobs = &MapHandler{
Status: status,
Bodies: bodies,
}
@@ -34,7 +34,7 @@ func WithListWorkflowJobsResponse(status int, bodies map[int]string) Option {
func WithListRunnersResponse(status int, body string) Option {
return func(c *ServerConfig) {
c.FixedResponses.ListRunners = &ListRunnersHandler{
c.ListRunners = &ListRunnersHandler{
Status: status,
Body: body,
}

View File

@@ -290,7 +290,7 @@ func (c *Client) ListRunnerGroupRepositoryAccesses(ctx context.Context, org stri
opts := github.ListOptions{PerPage: 100}
for {
list, res, err := c.Client.Actions.ListRepositoryAccessRunnerGroup(ctx, org, runnerGroupId, &opts)
list, res, err := c.Actions.ListRepositoryAccessRunnerGroup(ctx, org, runnerGroupId, &opts)
if err != nil {
return nil, fmt.Errorf("failed to list repository access for runner group: %w", err)
}
@@ -323,32 +323,32 @@ func (c *Client) cleanup() {
func (c *Client) createRegistrationToken(ctx context.Context, enterprise, org, repo string) (*github.RegistrationToken, *github.Response, error) {
if len(repo) > 0 {
return c.Client.Actions.CreateRegistrationToken(ctx, org, repo)
return c.Actions.CreateRegistrationToken(ctx, org, repo)
}
if len(org) > 0 {
return c.Client.Actions.CreateOrganizationRegistrationToken(ctx, org)
return c.Actions.CreateOrganizationRegistrationToken(ctx, org)
}
return c.Client.Enterprise.CreateRegistrationToken(ctx, enterprise)
return c.Enterprise.CreateRegistrationToken(ctx, enterprise)
}
func (c *Client) removeRunner(ctx context.Context, enterprise, org, repo string, runnerID int64) (*github.Response, error) {
if len(repo) > 0 {
return c.Client.Actions.RemoveRunner(ctx, org, repo, runnerID)
return c.Actions.RemoveRunner(ctx, org, repo, runnerID)
}
if len(org) > 0 {
return c.Client.Actions.RemoveOrganizationRunner(ctx, org, runnerID)
return c.Actions.RemoveOrganizationRunner(ctx, org, runnerID)
}
return c.Client.Enterprise.RemoveRunner(ctx, enterprise, runnerID)
return c.Enterprise.RemoveRunner(ctx, enterprise, runnerID)
}
func (c *Client) listRunners(ctx context.Context, enterprise, org, repo string, opts *github.ListOptions) (*github.Runners, *github.Response, error) {
if len(repo) > 0 {
return c.Client.Actions.ListRunners(ctx, org, repo, opts)
return c.Actions.ListRunners(ctx, org, repo, opts)
}
if len(org) > 0 {
return c.Client.Actions.ListOrganizationRunners(ctx, org, opts)
return c.Actions.ListOrganizationRunners(ctx, org, opts)
}
return c.Client.Enterprise.ListRunners(ctx, enterprise, opts)
return c.Enterprise.ListRunners(ctx, enterprise, opts)
}
func (c *Client) ListRepositoryWorkflowRuns(ctx context.Context, user string, repoName string) ([]*github.WorkflowRun, error) {
@@ -381,7 +381,7 @@ func (c *Client) listRepositoryWorkflowRuns(ctx context.Context, user string, re
}
for {
list, res, err := c.Client.Actions.ListRepositoryWorkflowRuns(ctx, user, repoName, &opts)
list, res, err := c.Actions.ListRepositoryWorkflowRuns(ctx, user, repoName, &opts)
if err != nil {
return workflowRuns, fmt.Errorf("failed to list workflow runs: %v", err)

View File

@@ -26,7 +26,7 @@ func newTestClient() *Client {
if err != nil {
panic(err)
}
client.Client.BaseURL = baseURL
client.BaseURL = baseURL
return client
}

15
go.mod
View File

@@ -1,7 +1,6 @@
module github.com/actions/actions-runner-controller
go 1.24.0
require (
github.com/bradleyfalzon/ghinstallation/v2 v2.14.0
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
@@ -17,23 +16,23 @@ require (
github.com/hashicorp/go-retryablehttp v0.7.7
github.com/kelseyhightower/envconfig v1.4.0
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.23.0
github.com/onsi/gomega v1.36.2
github.com/onsi/ginkgo/v2 v2.23.3
github.com/onsi/gomega v1.36.3
github.com/prometheus/client_golang v1.21.1
github.com/stretchr/testify v1.10.0
github.com/teambition/rrule-go v1.8.2
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
golang.org/x/net v0.37.0
golang.org/x/net v0.38.0
golang.org/x/oauth2 v0.28.0
golang.org/x/sync v0.12.0
gomodules.xyz/jsonpatch/v2 v2.5.0
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.32.2
k8s.io/apimachinery v0.32.2
k8s.io/client-go v0.32.2
k8s.io/api v0.32.3
k8s.io/apimachinery v0.32.3
k8s.io/client-go v0.32.3
k8s.io/utils v0.0.0-20241210054802-24370beab758
sigs.k8s.io/controller-runtime v0.20.3
sigs.k8s.io/controller-runtime v0.20.4
sigs.k8s.io/yaml v1.4.0
)

28
go.sum
View File

@@ -263,12 +263,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.23.0 h1:FA1xjp8ieYDzlgS5ABTpdUDB7wtngggONc8a7ku2NqQ=
github.com/onsi/ginkgo/v2 v2.23.0/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM=
github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0=
github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU=
github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -336,8 +336,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -405,22 +405,22 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.32.2 h1:bZrMLEkgizC24G9eViHGOPbW+aRo9duEISRIJKfdJuw=
k8s.io/api v0.32.2/go.mod h1:hKlhk4x1sJyYnHENsrdCWw31FEmCijNGPJO5WzHiJ6Y=
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4=
k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA=
k8s.io/apimachinery v0.32.2 h1:yoQBR9ZGkA6Rgmhbp/yuT9/g+4lxtsGYwW6dR6BDPLQ=
k8s.io/apimachinery v0.32.2/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/client-go v0.32.2 h1:4dYCD4Nz+9RApM2b/3BtVvBHw54QjMFUl1OLcJG5yOA=
k8s.io/client-go v0.32.2/go.mod h1:fpZ4oJXclZ3r2nDOv+Ux3XcJutfrwjKTCHz2H3sww94=
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU=
k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250304201544-e5f78fe3ede9 h1:t0huyHnz6HsokckRxAF1bY0cqPFwzINKCL7yltEjZQc=
k8s.io/kube-openapi v0.0.0-20250304201544-e5f78fe3ede9/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.20.3 h1:I6Ln8JfQjHH7JbtCD2HCYHoIzajoRxPNuvhvcDbZgkI=
sigs.k8s.io/controller-runtime v0.20.3/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU=
sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=

View File

@@ -7,7 +7,7 @@ OS_IMAGE ?= ubuntu-22.04
TARGETPLATFORM ?= $(shell arch)
RUNNER_VERSION ?= 2.323.0
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.6.2
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.7.0
DOCKER_VERSION ?= 24.0.7
# default list of platforms for which multiarch image is built

View File

@@ -1,2 +1,2 @@
RUNNER_VERSION=2.323.0
RUNNER_CONTAINER_HOOKS_VERSION=0.6.2
RUNNER_CONTAINER_HOOKS_VERSION=0.7.0

View File

@@ -37,7 +37,7 @@ var (
testResultCMNamePrefix = "test-result-"
RunnerVersion = "2.323.0"
RunnerContainerHooksVersion = "0.6.2"
RunnerContainerHooksVersion = "0.7.0"
)
// If you're willing to run this test via VS Code "run test" or "debug test",
@@ -598,9 +598,9 @@ func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env {
}
e.Kind = testing.StartKind(t, k8sMinorVer, testing.Preload(images...))
e.Env.Kubeconfig = e.Kind.Kubeconfig()
e.Kubeconfig = e.Kind.Kubeconfig()
} else {
e.Env.Kubeconfig = e.remoteKubeconfig
e.Kubeconfig = e.remoteKubeconfig
// Kind automatically installs https://github.com/rancher/local-path-provisioner for PVs.
// But assuming the remote cluster isn't a kind Kubernetes cluster,
@@ -1181,7 +1181,7 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
steps = append(steps,
testing.Step{
Name: "Set up Docker Buildx",
Uses: "docker/setup-buildx-action@v1",
Uses: "docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2",
With: setupBuildXActionWith,
},
testing.Step{
@@ -1193,7 +1193,7 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
Run: "docker run --rm test1",
},
testing.Step{
Uses: "addnab/docker-run-action@v3",
Uses: "addnab/docker-run-action@4f65fabd2431ebc8d299f8e5a018d79a769ae185",
With: &testing.With{
Image: "test1",
Run: "hello",
@@ -1234,7 +1234,7 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
steps = append(steps,
testing.Step{
Uses: "azure/setup-kubectl@v1",
Uses: "azure/setup-kubectl@3e0aec4d80787158d308d7b364cb1b702e7feb7f",
With: &testing.With{
Version: "v1.24.0",
},