mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
23 Commits
nikola-jok
...
gha-runner
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d9826e5244 | ||
|
|
6f3882c482 | ||
|
|
e46c929241 | ||
|
|
d4af75d82e | ||
|
|
e335f53037 | ||
|
|
c359d14e69 | ||
|
|
9d8c59aeb3 | ||
|
|
eef57e1a77 | ||
|
|
97697e80b4 | ||
|
|
27b292bdd3 | ||
|
|
1dbb88cb9e | ||
|
|
43f1cd0dac | ||
|
|
389d842a30 | ||
|
|
f6f42dd4c1 | ||
|
|
20e157fa72 | ||
|
|
cae7efa2c6 | ||
|
|
d6e2790db5 | ||
|
|
a1a8dc5606 | ||
|
|
16304b5ce7 | ||
|
|
32f19acc66 | ||
|
|
46ee5cf9a2 | ||
|
|
f832b0b254 | ||
|
|
a33d34a036 |
28
.github/actions/setup-arc-e2e/action.yaml
vendored
28
.github/actions/setup-arc-e2e/action.yaml
vendored
@@ -1,9 +1,9 @@
|
||||
name: 'Setup ARC E2E Test Action'
|
||||
description: 'Build controller image, create kind cluster, load the image, and exchange ARC configure token.'
|
||||
name: "Setup ARC E2E Test Action"
|
||||
description: "Build controller image, create kind cluster, load the image, and exchange ARC configure token."
|
||||
|
||||
inputs:
|
||||
app-id:
|
||||
description: 'GitHub App Id for exchange access token'
|
||||
description: "GitHub App Id for exchange access token"
|
||||
required: true
|
||||
app-pk:
|
||||
description: "GitHub App private key for exchange access token"
|
||||
@@ -20,30 +20,31 @@ inputs:
|
||||
|
||||
outputs:
|
||||
token:
|
||||
description: 'Token to use for configure ARC'
|
||||
description: "Token to use for configure ARC"
|
||||
value: ${{steps.config-token.outputs.token}}
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
|
||||
with:
|
||||
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||
# BuildKit v0.11 which has a bug causing intermittent
|
||||
# failures pushing images to GHCR
|
||||
version: v0.9.1
|
||||
driver-opts: image=moby/buildkit:v0.10.6
|
||||
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||
# BuildKit v0.11 which has a bug causing intermittent
|
||||
# failures pushing images to GHCR
|
||||
version: v0.9.1
|
||||
driver-opts: image=moby/buildkit:v0.10.6
|
||||
|
||||
- name: Build controller image
|
||||
uses: docker/build-push-action@v5
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
|
||||
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64
|
||||
load: true
|
||||
build-args: |
|
||||
DOCKER_IMAGE_NAME=${{inputs.image-name}}
|
||||
VERSION=${{inputs.image-tag}}
|
||||
VERSION=${{inputs.image-tag}}
|
||||
tags: |
|
||||
${{inputs.image-name}}:${{inputs.image-tag}}
|
||||
no-cache: true
|
||||
@@ -56,8 +57,9 @@ runs:
|
||||
|
||||
- name: Get configure token
|
||||
id: config-token
|
||||
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ inputs.app-id }}
|
||||
application_private_key: ${{ inputs.app-pk }}
|
||||
organization: ${{ inputs.target-org}}
|
||||
organization: ${{ inputs.target-org}}
|
||||
|
||||
@@ -24,23 +24,27 @@ runs:
|
||||
shell: bash
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
# https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
# https://github.com/docker/setup-buildx-action/releases/tag/v3.10.0
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }}
|
||||
uses: docker/login-action@v3
|
||||
# https://github.com/docker/login-action/releases/tag/v3.4.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
|
||||
with:
|
||||
username: ${{ inputs.username }}
|
||||
password: ${{ inputs.password }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }}
|
||||
uses: docker/login-action@v3
|
||||
# https://github.com/docker/login-action/releases/tag/v3.4.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ inputs.ghcr_username }}
|
||||
|
||||
286
.github/workflows/arc-publish-chart.yaml
vendored
286
.github/workflows/arc-publish-chart.yaml
vendored
@@ -5,18 +5,18 @@ name: Publish ARC Helm Charts
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- master
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/arc-publish-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!charts/gha-runner-scale-set-controller/**'
|
||||
- '!charts/gha-runner-scale-set/**'
|
||||
- '!**.md'
|
||||
- "charts/**"
|
||||
- ".github/workflows/arc-publish-chart.yaml"
|
||||
- "!charts/actions-runner-controller/docs/**"
|
||||
- "!charts/gha-runner-scale-set-controller/**"
|
||||
- "!charts/gha-runner-scale-set/**"
|
||||
- "!**.md"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
force:
|
||||
description: 'Force publish even if the chart version is not bumped'
|
||||
description: "Force publish even if the chart version is not bumped"
|
||||
type: boolean
|
||||
required: true
|
||||
default: false
|
||||
@@ -39,86 +39,89 @@ jobs:
|
||||
outputs:
|
||||
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Set up kube-score
|
||||
run: |
|
||||
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||
chmod 755 kube-score
|
||||
- name: Set up kube-score
|
||||
run: |
|
||||
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||
chmod 755 kube-score
|
||||
|
||||
- name: Kube-score generated manifests
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
- name: Kube-score generated manifests
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.0
|
||||
- name: Set up chart-testing
|
||||
# https://github.com/helm/chart-testing-action/releases/tag/v2.7.0
|
||||
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: |
|
||||
ct lint --config charts/.ci/ct-config.yaml
|
||||
- name: Run chart-testing (lint)
|
||||
run: |
|
||||
ct lint --config charts/.ci/ct-config.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
uses: helm/kind-action@v1.4.0
|
||||
- name: Create kind cluster
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
# https://github.com/helm/kind-action/releases/tag/v1.12.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
|
||||
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
- name: Install cert-manager
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
- name: Install cert-manager
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct install --config charts/.ci/ct-config.yaml
|
||||
- name: Run chart-testing (install)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct install --config charts/.ci/ct-config.yaml
|
||||
|
||||
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
|
||||
- name: Check if Chart Publish is Needed
|
||||
id: publish-chart-step
|
||||
run: |
|
||||
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
|
||||
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
|
||||
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
|
||||
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
|
||||
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
|
||||
- name: Check if Chart Publish is Needed
|
||||
id: publish-chart-step
|
||||
run: |
|
||||
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
|
||||
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
|
||||
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
|
||||
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
|
||||
|
||||
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
|
||||
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
|
||||
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
|
||||
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
|
||||
|
||||
# Always publish if force is true
|
||||
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
|
||||
echo "publish=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "publish=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
# Always publish if force is true
|
||||
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
|
||||
echo "publish=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "publish=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
publish-chart:
|
||||
if: needs.lint-chart.outputs.publish-chart == 'true'
|
||||
@@ -133,80 +136,81 @@ jobs:
|
||||
CHART_TARGET_BRANCH: master
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
organization: ${{ env.CHART_TARGET_ORG }}
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||
organization: ${{ env.CHART_TARGET_ORG }}
|
||||
|
||||
- name: Install chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.4.1
|
||||
with:
|
||||
install_only: true
|
||||
install_dir: ${{ github.workspace }}/bin
|
||||
- name: Install chart-releaser
|
||||
uses: helm/chart-releaser-action@cae68fefc6b5f367a0275617c9f83181ba54714f
|
||||
with:
|
||||
install_only: true
|
||||
install_dir: ${{ github.workspace }}/bin
|
||||
|
||||
- name: Package and upload release assets
|
||||
run: |
|
||||
cr package \
|
||||
${{ github.workspace }}/charts/actions-runner-controller/ \
|
||||
--package-path .cr-release-packages
|
||||
- name: Package and upload release assets
|
||||
run: |
|
||||
cr package \
|
||||
${{ github.workspace }}/charts/actions-runner-controller/ \
|
||||
--package-path .cr-release-packages
|
||||
|
||||
cr upload \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--package-path .cr-release-packages \
|
||||
--token ${{ secrets.GITHUB_TOKEN }}
|
||||
cr upload \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--package-path .cr-release-packages \
|
||||
--token ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate updated index.yaml
|
||||
run: |
|
||||
cr index \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--index-path ${{ github.workspace }}/index.yaml \
|
||||
--token ${{ secrets.GITHUB_TOKEN }} \
|
||||
--push \
|
||||
--pages-branch 'gh-pages' \
|
||||
--pages-index-path 'index.yaml'
|
||||
- name: Generate updated index.yaml
|
||||
run: |
|
||||
cr index \
|
||||
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||
--index-path ${{ github.workspace }}/index.yaml \
|
||||
--token ${{ secrets.GITHUB_TOKEN }} \
|
||||
--push \
|
||||
--pages-branch 'gh-pages' \
|
||||
--pages-index-path 'index.yaml'
|
||||
|
||||
# Chart Release was never intended to publish to a different repo
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
- name: Checkout target repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||
path: ${{ env.CHART_TARGET_REPO }}
|
||||
ref: ${{ env.CHART_TARGET_BRANCH }}
|
||||
token: ${{ steps.get_workflow_token.outputs.token }}
|
||||
# Chart Release was never intended to publish to a different repo
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
- name: Checkout target repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||
path: ${{ env.CHART_TARGET_REPO }}
|
||||
ref: ${{ env.CHART_TARGET_BRANCH }}
|
||||
token: ${{ steps.get_workflow_token.outputs.token }}
|
||||
|
||||
- name: Copy index.yaml
|
||||
run: |
|
||||
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
|
||||
- name: Copy index.yaml
|
||||
run: |
|
||||
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
|
||||
|
||||
- name: Commit and push to target repository
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
git add .
|
||||
git commit -m "Update index.yaml"
|
||||
git push
|
||||
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
|
||||
- name: Commit and push to target repository
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
git add .
|
||||
git commit -m "Update index.yaml"
|
||||
git push
|
||||
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
|
||||
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
|
||||
- name: Job summary
|
||||
run: |
|
||||
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
11
.github/workflows/arc-publish.yaml
vendored
11
.github/workflows/arc-publish.yaml
vendored
@@ -9,17 +9,17 @@ on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_tag_name:
|
||||
description: 'Tag name of the release to publish'
|
||||
description: "Tag name of the release to publish"
|
||||
required: true
|
||||
push_to_registries:
|
||||
description: 'Push images to registries'
|
||||
description: "Push images to registries"
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
env:
|
||||
TARGET_ORG: actions-runner-controller
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version-file: "go.mod"
|
||||
|
||||
- name: Install tools
|
||||
run: |
|
||||
@@ -73,6 +73,7 @@ jobs:
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
|
||||
7
.github/workflows/arc-release-runners.yaml
vendored
7
.github/workflows/arc-release-runners.yaml
vendored
@@ -7,10 +7,10 @@ on:
|
||||
# are available to the workflow run
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- "master"
|
||||
paths:
|
||||
- 'runner/VERSION'
|
||||
- '.github/workflows/arc-release-runners.yaml'
|
||||
- "runner/VERSION"
|
||||
- ".github/workflows/arc-release-runners.yaml"
|
||||
|
||||
env:
|
||||
# Safeguard to prevent pushing images to registeries after build
|
||||
@@ -39,6 +39,7 @@ jobs:
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
|
||||
50
.github/workflows/arc-validate-chart.yaml
vendored
50
.github/workflows/arc-validate-chart.yaml
vendored
@@ -5,20 +5,20 @@ on:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/arc-validate-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!**.md'
|
||||
- '!charts/gha-runner-scale-set-controller/**'
|
||||
- '!charts/gha-runner-scale-set/**'
|
||||
- "charts/**"
|
||||
- ".github/workflows/arc-validate-chart.yaml"
|
||||
- "!charts/actions-runner-controller/docs/**"
|
||||
- "!**.md"
|
||||
- "!charts/gha-runner-scale-set-controller/**"
|
||||
- "!charts/gha-runner-scale-set/**"
|
||||
push:
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/arc-validate-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!**.md'
|
||||
- '!charts/gha-runner-scale-set-controller/**'
|
||||
- '!charts/gha-runner-scale-set/**'
|
||||
- "charts/**"
|
||||
- ".github/workflows/arc-validate-chart.yaml"
|
||||
- "!charts/actions-runner-controller/docs/**"
|
||||
- "!**.md"
|
||||
- "!charts/gha-runner-scale-set-controller/**"
|
||||
- "!charts/gha-runner-scale-set/**"
|
||||
workflow_dispatch:
|
||||
env:
|
||||
KUBE_SCORE_VERSION: 1.10.0
|
||||
@@ -45,34 +45,19 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Set up kube-score
|
||||
run: |
|
||||
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||
chmod 755 kube-score
|
||||
|
||||
- name: Kube-score generated manifests
|
||||
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
|
||||
--ignore-test pod-networkpolicy
|
||||
--ignore-test deployment-has-poddisruptionbudget
|
||||
--ignore-test deployment-has-host-podantiaffinity
|
||||
--ignore-test container-security-context
|
||||
--ignore-test pod-probes
|
||||
--ignore-test container-image-tag
|
||||
--enable-optional-test container-security-context-privileged
|
||||
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.0
|
||||
# https://github.com/helm/chart-testing-action/releases/tag/v2.7.0
|
||||
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
@@ -87,7 +72,8 @@ jobs:
|
||||
ct lint --config charts/.ci/ct-config.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.4.0
|
||||
# https://github.com/helm/kind-action/releases/tag/v1.12.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
|
||||
38
.github/workflows/arc-validate-runners.yaml
vendored
38
.github/workflows/arc-validate-runners.yaml
vendored
@@ -3,17 +3,17 @@ name: Validate ARC Runners
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- '**'
|
||||
- "**"
|
||||
paths:
|
||||
- 'runner/**'
|
||||
- 'test/startup/**'
|
||||
- '!**.md'
|
||||
- "runner/**"
|
||||
- "test/startup/**"
|
||||
- "!**.md"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
# but not pushes to master branch by making the concurrency group name unique
|
||||
# for pushes
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
@@ -25,28 +25,16 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: shellcheck
|
||||
uses: reviewdog/action-shellcheck@v1
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
path: "./runner"
|
||||
pattern: |
|
||||
*.sh
|
||||
*.bash
|
||||
update-status
|
||||
# Make this consistent with `make shellsheck`
|
||||
shellcheck_flags: "--shell bash --source-path runner"
|
||||
exclude: "./.git/*"
|
||||
check_all_files_with_shebangs: "false"
|
||||
# Set this to "true" once we addressed all the shellcheck findings
|
||||
fail_on_error: "false"
|
||||
- name: "Run shellcheck"
|
||||
run: make shellcheck
|
||||
|
||||
test-runner-entrypoint:
|
||||
name: Test entrypoint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
make acceptance/runner/startup
|
||||
- name: Run tests
|
||||
run: |
|
||||
make acceptance/runner/startup
|
||||
|
||||
2
.github/workflows/gha-e2e-tests.yaml
vendored
2
.github/workflows/gha-e2e-tests.yaml
vendored
@@ -16,7 +16,7 @@ env:
|
||||
TARGET_ORG: actions-runner-controller
|
||||
TARGET_REPO: arc_e2e_test_dummy
|
||||
IMAGE_NAME: "arc-test-image"
|
||||
IMAGE_VERSION: "0.11.0"
|
||||
IMAGE_VERSION: "0.12.0"
|
||||
|
||||
concurrency:
|
||||
# This will make sure we only apply the concurrency limits on pull requests
|
||||
|
||||
29
.github/workflows/gha-publish-chart.yaml
vendored
29
.github/workflows/gha-publish-chart.yaml
vendored
@@ -4,27 +4,27 @@ on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
ref:
|
||||
description: 'The branch, tag or SHA to cut a release from'
|
||||
description: "The branch, tag or SHA to cut a release from"
|
||||
required: false
|
||||
type: string
|
||||
default: ''
|
||||
default: ""
|
||||
release_tag_name:
|
||||
description: 'The name to tag the controller image with'
|
||||
description: "The name to tag the controller image with"
|
||||
required: true
|
||||
type: string
|
||||
default: 'canary'
|
||||
default: "canary"
|
||||
push_to_registries:
|
||||
description: 'Push images to registries'
|
||||
description: "Push images to registries"
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
publish_gha_runner_scale_set_controller_chart:
|
||||
description: 'Publish new helm chart for gha-runner-scale-set-controller'
|
||||
description: "Publish new helm chart for gha-runner-scale-set-controller"
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
publish_gha_runner_scale_set_chart:
|
||||
description: 'Publish new helm chart for gha-runner-scale-set'
|
||||
description: "Publish new helm chart for gha-runner-scale-set"
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
@@ -72,10 +72,11 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
# https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
|
||||
with:
|
||||
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||
# BuildKit v0.11 which has a bug causing intermittent
|
||||
@@ -84,14 +85,16 @@ jobs:
|
||||
driver-opts: image=moby/buildkit:v0.10.6
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
# https://github.com/docker/login-action/releases/tag/v3.4.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build & push controller image
|
||||
uses: docker/build-push-action@v5
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
|
||||
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@@ -140,7 +143,7 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
@@ -188,7 +191,7 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
41
.github/workflows/gha-validate-chart.yaml
vendored
41
.github/workflows/gha-validate-chart.yaml
vendored
@@ -5,16 +5,16 @@ on:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/gha-validate-chart.yaml'
|
||||
- '!charts/actions-runner-controller/**'
|
||||
- '!**.md'
|
||||
- "charts/**"
|
||||
- ".github/workflows/gha-validate-chart.yaml"
|
||||
- "!charts/actions-runner-controller/**"
|
||||
- "!**.md"
|
||||
push:
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/gha-validate-chart.yaml'
|
||||
- '!charts/actions-runner-controller/**'
|
||||
- '!**.md'
|
||||
- "charts/**"
|
||||
- ".github/workflows/gha-validate-chart.yaml"
|
||||
- "!charts/actions-runner-controller/**"
|
||||
- "!**.md"
|
||||
workflow_dispatch:
|
||||
env:
|
||||
KUBE_SCORE_VERSION: 1.16.1
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
|
||||
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2.0
|
||||
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
@@ -49,10 +49,11 @@ jobs:
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
python-version: "3.11"
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.0
|
||||
# https://github.com/helm/chart-testing-action/releases/tag/v2.7.0
|
||||
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
@@ -68,13 +69,14 @@ jobs:
|
||||
ct lint --config charts/.ci/ct-config-gha.yaml
|
||||
|
||||
- name: Set up docker buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Build controller image
|
||||
uses: docker/build-push-action@v5
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
|
||||
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
file: Dockerfile
|
||||
@@ -89,7 +91,8 @@ jobs:
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.4.0
|
||||
# https://github.com/helm/kind-action/releases/tag/v1.12.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
with:
|
||||
cluster_name: chart-testing
|
||||
@@ -97,11 +100,11 @@ jobs:
|
||||
- name: Load image into cluster
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
export DOCKER_IMAGE_NAME=test-arc
|
||||
export VERSION=dev
|
||||
export IMG_RESULT=load
|
||||
make docker-buildx
|
||||
kind load docker-image test-arc:dev --name chart-testing
|
||||
export DOCKER_IMAGE_NAME=test-arc
|
||||
export VERSION=dev
|
||||
export IMG_RESULT=load
|
||||
make docker-buildx
|
||||
kind load docker-image test-arc:dev --name chart-testing
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
|
||||
61
.github/workflows/global-publish-canary.yaml
vendored
61
.github/workflows/global-publish-canary.yaml
vendored
@@ -7,30 +7,30 @@ on:
|
||||
branches:
|
||||
- master
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '.github/actions/**'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/workflows/e2e-test-dispatch-workflow.yaml'
|
||||
- '.github/workflows/gha-e2e-tests.yaml'
|
||||
- '.github/workflows/arc-publish.yaml'
|
||||
- '.github/workflows/arc-publish-chart.yaml'
|
||||
- '.github/workflows/gha-publish-chart.yaml'
|
||||
- '.github/workflows/arc-release-runners.yaml'
|
||||
- '.github/workflows/global-run-codeql.yaml'
|
||||
- '.github/workflows/global-run-first-interaction.yaml'
|
||||
- '.github/workflows/global-run-stale.yaml'
|
||||
- '.github/workflows/arc-update-runners-scheduled.yaml'
|
||||
- '.github/workflows/validate-arc.yaml'
|
||||
- '.github/workflows/arc-validate-chart.yaml'
|
||||
- '.github/workflows/gha-validate-chart.yaml'
|
||||
- '.github/workflows/arc-validate-runners.yaml'
|
||||
- '.github/dependabot.yml'
|
||||
- '.github/RELEASE_NOTE_TEMPLATE.md'
|
||||
- 'runner/**'
|
||||
- '.gitignore'
|
||||
- 'PROJECT'
|
||||
- 'LICENSE'
|
||||
- 'Makefile'
|
||||
- "**.md"
|
||||
- ".github/actions/**"
|
||||
- ".github/ISSUE_TEMPLATE/**"
|
||||
- ".github/workflows/e2e-test-dispatch-workflow.yaml"
|
||||
- ".github/workflows/gha-e2e-tests.yaml"
|
||||
- ".github/workflows/arc-publish.yaml"
|
||||
- ".github/workflows/arc-publish-chart.yaml"
|
||||
- ".github/workflows/gha-publish-chart.yaml"
|
||||
- ".github/workflows/arc-release-runners.yaml"
|
||||
- ".github/workflows/global-run-codeql.yaml"
|
||||
- ".github/workflows/global-run-first-interaction.yaml"
|
||||
- ".github/workflows/global-run-stale.yaml"
|
||||
- ".github/workflows/arc-update-runners-scheduled.yaml"
|
||||
- ".github/workflows/validate-arc.yaml"
|
||||
- ".github/workflows/arc-validate-chart.yaml"
|
||||
- ".github/workflows/gha-validate-chart.yaml"
|
||||
- ".github/workflows/arc-validate-runners.yaml"
|
||||
- ".github/dependabot.yml"
|
||||
- ".github/RELEASE_NOTE_TEMPLATE.md"
|
||||
- "runner/**"
|
||||
- ".gitignore"
|
||||
- "PROJECT"
|
||||
- "LICENSE"
|
||||
- "Makefile"
|
||||
|
||||
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
|
||||
permissions:
|
||||
@@ -59,6 +59,7 @@ jobs:
|
||||
|
||||
- name: Get Token
|
||||
id: get_workflow_token
|
||||
# https://github.com/peter-murray/workflow-application-token-action/releases/tag/v3.0.0
|
||||
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||
with:
|
||||
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||
@@ -93,7 +94,8 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
# https://github.com/docker/login-action/releases/tag/v3.4.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -110,16 +112,19 @@ jobs:
|
||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
# https://github.com/docker/setup-qemu-action/releases/tag/v3.6.0
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
# https://github.com/docker/setup-buildx-action/releases/tag/v3.10.0
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2
|
||||
with:
|
||||
version: latest
|
||||
|
||||
# Unstable builds - run at your own risk
|
||||
- name: Build and Push
|
||||
uses: docker/build-push-action@v5
|
||||
# https://github.com/docker/build-push-action/releases/tag/v6.15.0
|
||||
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
|
||||
29
.github/workflows/go.yaml
vendored
29
.github/workflows/go.yaml
vendored
@@ -4,16 +4,16 @@ on:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- '.github/workflows/go.yaml'
|
||||
- '**.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
- ".github/workflows/go.yaml"
|
||||
- "**.go"
|
||||
- "go.mod"
|
||||
- "go.sum"
|
||||
pull_request:
|
||||
paths:
|
||||
- '.github/workflows/go.yaml'
|
||||
- '**.go'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
- ".github/workflows/go.yaml"
|
||||
- "**.go"
|
||||
- "go.mod"
|
||||
- "go.sum"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version-file: "go.mod"
|
||||
cache: false
|
||||
- name: fmt
|
||||
run: go fmt ./...
|
||||
@@ -45,13 +45,14 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version-file: "go.mod"
|
||||
cache: false
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
# https://github.com/golangci/golangci-lint-action/releases/tag/v7.0.0
|
||||
uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd
|
||||
with:
|
||||
only-new-issues: true
|
||||
version: v1.55.2
|
||||
version: v2.1.2
|
||||
|
||||
generate:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -59,7 +60,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version-file: "go.mod"
|
||||
cache: false
|
||||
- name: Generate
|
||||
run: make generate
|
||||
@@ -72,7 +73,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version-file: "go.mod"
|
||||
- run: make manifests
|
||||
- name: Check diff
|
||||
run: git diff --exit-code
|
||||
|
||||
@@ -1,19 +1,14 @@
|
||||
version: "2"
|
||||
run:
|
||||
timeout: 3m
|
||||
output:
|
||||
formats:
|
||||
- format: github-actions
|
||||
path: stdout
|
||||
linters-settings:
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
- (net/http.ResponseWriter).Write
|
||||
- (*net/http.Server).Shutdown
|
||||
- (*github.com/actions/actions-runner-controller/simulator.VisibleRunnerGroups).Add
|
||||
- (*github.com/actions/actions-runner-controller/testing.Kind).Stop
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: controllers/suite_test.go
|
||||
linters:
|
||||
- staticcheck
|
||||
text: "SA1019"
|
||||
timeout: 5m
|
||||
linters:
|
||||
settings:
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
- (net/http.ResponseWriter).Write
|
||||
- (*net/http.Server).Shutdown
|
||||
- (*github.com/actions/actions-runner-controller/simulator.VisibleRunnerGroups).Add
|
||||
- (*github.com/actions/actions-runner-controller/testing.Kind).Stop
|
||||
exclusions:
|
||||
presets:
|
||||
- std-error-handling
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build the manager binary
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24.0 as builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.24.3 AS builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
@@ -30,7 +30,7 @@ ARG TARGETPLATFORM TARGETOS TARGETARCH TARGETVARIANT VERSION=dev COMMIT_SHA=dev
|
||||
# to avoid https://github.com/moby/buildkit/issues/2334
|
||||
# We can use docker layer cache so the build is fast enogh anyway
|
||||
# We also use per-platform GOCACHE for the same reason.
|
||||
ENV GOCACHE /build/${TARGETPLATFORM}/root/.cache/go-build
|
||||
ENV GOCACHE="/build/${TARGETPLATFORM}/root/.cache/go-build"
|
||||
|
||||
# Build
|
||||
RUN --mount=target=. \
|
||||
|
||||
10
Makefile
10
Makefile
@@ -6,7 +6,7 @@ endif
|
||||
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
||||
VERSION ?= dev
|
||||
COMMIT_SHA = $(shell git rev-parse HEAD)
|
||||
RUNNER_VERSION ?= 2.323.0
|
||||
RUNNER_VERSION ?= 2.325.0
|
||||
TARGETPLATFORM ?= $(shell arch)
|
||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||
RUNNER_TAG ?= ${VERSION}
|
||||
@@ -20,7 +20,7 @@ KUBECONTEXT ?= kind-acceptance
|
||||
CLUSTER ?= acceptance
|
||||
CERT_MANAGER_VERSION ?= v1.1.1
|
||||
KUBE_RBAC_PROXY_VERSION ?= v0.11.0
|
||||
SHELLCHECK_VERSION ?= 0.8.0
|
||||
SHELLCHECK_VERSION ?= 0.10.0
|
||||
|
||||
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
|
||||
CRD_OPTIONS ?= "crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=true"
|
||||
@@ -68,7 +68,7 @@ endif
|
||||
all: manager
|
||||
|
||||
lint:
|
||||
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v1.57.2 golangci-lint run
|
||||
docker run --rm -v $(PWD):/app -w /app golangci/golangci-lint:v2.1.2 golangci-lint run
|
||||
|
||||
GO_TEST_ARGS ?= -short
|
||||
|
||||
@@ -204,7 +204,7 @@ generate: controller-gen
|
||||
|
||||
# Run shellcheck on runner scripts
|
||||
shellcheck: shellcheck-install
|
||||
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh hack/*.sh
|
||||
$(TOOLS_PATH)/shellcheck --shell bash --source-path runner runner/*.sh runner/update-status hack/*.sh
|
||||
|
||||
docker-buildx:
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled ;\
|
||||
@@ -310,7 +310,7 @@ github-release: release
|
||||
# Otherwise we get errors like the below:
|
||||
# Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property]
|
||||
#
|
||||
# Note that controller-gen newer than 0.6.2 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
|
||||
# Note that controller-gen newer than 0.7.0 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
|
||||
# Otherwise ObjectMeta embedded in Spec results in empty on the storage.
|
||||
controller-gen:
|
||||
ifeq (, $(shell which controller-gen))
|
||||
|
||||
@@ -5,22 +5,23 @@ on:
|
||||
|
||||
env:
|
||||
IRSA_ROLE_ARN:
|
||||
ASSUME_ROLE_ARN:
|
||||
AWS_REGION:
|
||||
ASSUME_ROLE_ARN:
|
||||
AWS_REGION:
|
||||
|
||||
jobs:
|
||||
assume-role-in-runner-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
runs-on: ["self-hosted", "Linux"]
|
||||
steps:
|
||||
- name: Test aws-actions/configure-aws-credentials Action
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
# https://github.com/aws-actions/configure-aws-credentials/releases/tag/v4.1.0
|
||||
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722
|
||||
with:
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
|
||||
role-duration-seconds: 900
|
||||
assume-role-in-container-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
container:
|
||||
runs-on: ["self-hosted", "Linux"]
|
||||
container:
|
||||
image: amazon/aws-cli
|
||||
env:
|
||||
AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||
@@ -29,7 +30,8 @@ jobs:
|
||||
- /var/run/secrets/eks.amazonaws.com/serviceaccount/token:/var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||
steps:
|
||||
- name: Test aws-actions/configure-aws-credentials Action in container
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
# https://github.com/aws-actions/configure-aws-credentials/releases/tag/v4.1.0
|
||||
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722
|
||||
with:
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
|
||||
|
||||
@@ -8,8 +8,8 @@ env:
|
||||
|
||||
jobs:
|
||||
run-step-in-container-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
container:
|
||||
runs-on: ["self-hosted", "Linux"]
|
||||
container:
|
||||
image: alpine
|
||||
steps:
|
||||
- name: Test we are working in the container
|
||||
@@ -21,7 +21,7 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
setup-python-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
runs-on: ["self-hosted", "Linux"]
|
||||
steps:
|
||||
- name: Print native Python environment
|
||||
run: |
|
||||
@@ -41,12 +41,12 @@ jobs:
|
||||
echo "Python version detected : $(python --version 2>&1)"
|
||||
fi
|
||||
setup-node-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
runs-on: ["self-hosted", "Linux"]
|
||||
steps:
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '12'
|
||||
- name: Test actions/setup-node works
|
||||
node-version: "12"
|
||||
- name: Test actions/setup-node works
|
||||
run: |
|
||||
VERSION=$(node --version | cut -c 2- | cut -d '.' -f1)
|
||||
if [[ $VERSION != '12' ]]; then
|
||||
@@ -57,13 +57,14 @@ jobs:
|
||||
echo "Node version detected : $(node --version 2>&1)"
|
||||
fi
|
||||
setup-ruby-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
runs-on: ["self-hosted", "Linux"]
|
||||
steps:
|
||||
- uses: ruby/setup-ruby@v1
|
||||
# https://github.com/ruby/setup-ruby/releases/tag/v1.227.0
|
||||
- uses: ruby/setup-ruby@1a615958ad9d422dd932dc1d5823942ee002799f
|
||||
with:
|
||||
ruby-version: 3.0
|
||||
bundler-cache: true
|
||||
- name: Test ruby/setup-ruby works
|
||||
- name: Test ruby/setup-ruby works
|
||||
run: |
|
||||
VERSION=$(ruby --version | cut -d ' ' -f2 | cut -d '.' -f1-2)
|
||||
if [[ $VERSION != '3.0' ]]; then
|
||||
@@ -74,8 +75,8 @@ jobs:
|
||||
echo "Ruby version detected : $(ruby --version 2>&1)"
|
||||
fi
|
||||
python-shell-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
steps:
|
||||
runs-on: ["self-hosted", "Linux"]
|
||||
steps:
|
||||
- name: Test Python shell works
|
||||
run: |
|
||||
import os
|
||||
|
||||
89
apis/actions.github.com/v1alpha1/appconfig/appconfig.go
Normal file
89
apis/actions.github.com/v1alpha1/appconfig/appconfig.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package appconfig
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type AppConfig struct {
|
||||
AppID string `json:"github_app_id"`
|
||||
AppInstallationID int64 `json:"github_app_installation_id"`
|
||||
AppPrivateKey string `json:"github_app_private_key"`
|
||||
|
||||
Token string `json:"github_token"`
|
||||
}
|
||||
|
||||
func (c *AppConfig) tidy() *AppConfig {
|
||||
if len(c.Token) > 0 {
|
||||
return &AppConfig{
|
||||
Token: c.Token,
|
||||
}
|
||||
}
|
||||
|
||||
return &AppConfig{
|
||||
AppID: c.AppID,
|
||||
AppInstallationID: c.AppInstallationID,
|
||||
AppPrivateKey: c.AppPrivateKey,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *AppConfig) Validate() error {
|
||||
if c == nil {
|
||||
return fmt.Errorf("missing app config")
|
||||
}
|
||||
hasToken := len(c.Token) > 0
|
||||
hasGitHubAppAuth := c.hasGitHubAppAuth()
|
||||
if hasToken && hasGitHubAppAuth {
|
||||
return fmt.Errorf("both PAT and GitHub App credentials provided. should only provide one")
|
||||
}
|
||||
if !hasToken && !hasGitHubAppAuth {
|
||||
return fmt.Errorf("no credentials provided: either a PAT or GitHub App credentials should be provided")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AppConfig) hasGitHubAppAuth() bool {
|
||||
return len(c.AppID) > 0 && c.AppInstallationID > 0 && len(c.AppPrivateKey) > 0
|
||||
}
|
||||
|
||||
func FromSecret(secret *corev1.Secret) (*AppConfig, error) {
|
||||
var appInstallationID int64
|
||||
if v := string(secret.Data["github_app_installation_id"]); v != "" {
|
||||
val, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
appInstallationID = val
|
||||
}
|
||||
|
||||
cfg := &AppConfig{
|
||||
Token: string(secret.Data["github_token"]),
|
||||
AppID: string(secret.Data["github_app_id"]),
|
||||
AppInstallationID: appInstallationID,
|
||||
AppPrivateKey: string(secret.Data["github_app_private_key"]),
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("failed to validate config: %v", err)
|
||||
}
|
||||
|
||||
return cfg.tidy(), nil
|
||||
}
|
||||
|
||||
func FromJSONString(v string) (*AppConfig, error) {
|
||||
var appConfig AppConfig
|
||||
if err := json.NewDecoder(bytes.NewBufferString(v)).Decode(&appConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := appConfig.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("failed to validate app config decoded from string: %w", err)
|
||||
}
|
||||
|
||||
return appConfig.tidy(), nil
|
||||
}
|
||||
152
apis/actions.github.com/v1alpha1/appconfig/appconfig_test.go
Normal file
152
apis/actions.github.com/v1alpha1/appconfig/appconfig_test.go
Normal file
@@ -0,0 +1,152 @@
|
||||
package appconfig
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestAppConfigValidate_invalid(t *testing.T) {
|
||||
tt := map[string]*AppConfig{
|
||||
"empty": {},
|
||||
"token and app config": {
|
||||
AppID: "1",
|
||||
AppInstallationID: 2,
|
||||
AppPrivateKey: "private key",
|
||||
Token: "token",
|
||||
},
|
||||
"app id not set": {
|
||||
AppInstallationID: 2,
|
||||
AppPrivateKey: "private key",
|
||||
},
|
||||
"app installation id not set": {
|
||||
AppID: "2",
|
||||
AppPrivateKey: "private key",
|
||||
},
|
||||
"private key empty": {
|
||||
AppID: "2",
|
||||
AppInstallationID: 1,
|
||||
AppPrivateKey: "",
|
||||
},
|
||||
}
|
||||
|
||||
for name, cfg := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
err := cfg.Validate()
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppConfigValidate_valid(t *testing.T) {
|
||||
tt := map[string]*AppConfig{
|
||||
"token": {
|
||||
Token: "token",
|
||||
},
|
||||
"app ID": {
|
||||
AppID: "1",
|
||||
AppInstallationID: 2,
|
||||
AppPrivateKey: "private key",
|
||||
},
|
||||
}
|
||||
|
||||
for name, cfg := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
err := cfg.Validate()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppConfigFromSecret_invalid(t *testing.T) {
|
||||
tt := map[string]map[string]string{
|
||||
"empty": {},
|
||||
"token and app provided": {
|
||||
"github_token": "token",
|
||||
"github_app_id": "2",
|
||||
"githu_app_installation_id": "3",
|
||||
"github_app_private_key": "private key",
|
||||
},
|
||||
"invalid app id": {
|
||||
"github_app_id": "abc",
|
||||
"githu_app_installation_id": "3",
|
||||
"github_app_private_key": "private key",
|
||||
},
|
||||
"invalid app installation_id": {
|
||||
"github_app_id": "1",
|
||||
"githu_app_installation_id": "abc",
|
||||
"github_app_private_key": "private key",
|
||||
},
|
||||
"empty private key": {
|
||||
"github_app_id": "1",
|
||||
"githu_app_installation_id": "2",
|
||||
"github_app_private_key": "",
|
||||
},
|
||||
}
|
||||
|
||||
for name, data := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
secret := &corev1.Secret{
|
||||
StringData: data,
|
||||
}
|
||||
|
||||
appConfig, err := FromSecret(secret)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, appConfig)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppConfigFromSecret_valid(t *testing.T) {
|
||||
tt := map[string]map[string]string{
|
||||
"with token": {
|
||||
"github_token": "token",
|
||||
},
|
||||
"app config": {
|
||||
"github_app_id": "2",
|
||||
"githu_app_installation_id": "3",
|
||||
"github_app_private_key": "private key",
|
||||
},
|
||||
}
|
||||
|
||||
for name, data := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
secret := &corev1.Secret{
|
||||
StringData: data,
|
||||
}
|
||||
|
||||
appConfig, err := FromSecret(secret)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, appConfig)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppConfigFromString_valid(t *testing.T) {
|
||||
tt := map[string]*AppConfig{
|
||||
"token": {
|
||||
Token: "token",
|
||||
},
|
||||
"app ID": {
|
||||
AppID: "1",
|
||||
AppInstallationID: 2,
|
||||
AppPrivateKey: "private key",
|
||||
},
|
||||
}
|
||||
|
||||
for name, cfg := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
bytes, err := json.Marshal(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := FromJSONString(string(bytes))
|
||||
require.NoError(t, err)
|
||||
|
||||
want := cfg.tidy()
|
||||
assert.Equal(t, want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -59,7 +59,10 @@ type AutoscalingListenerSpec struct {
|
||||
Proxy *ProxyConfig `json:"proxy,omitempty"`
|
||||
|
||||
// +optional
|
||||
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
|
||||
GitHubServerTLS *TLSConfig `json:"githubServerTLS,omitempty"`
|
||||
|
||||
// +optional
|
||||
VaultConfig *VaultConfig `json:"vaultConfig,omitempty"`
|
||||
|
||||
// +optional
|
||||
Metrics *MetricsConfig `json:"metrics,omitempty"`
|
||||
@@ -87,7 +90,6 @@ type AutoscalingListener struct {
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// AutoscalingListenerList contains a list of AutoscalingListener
|
||||
type AutoscalingListenerList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/actions/actions-runner-controller/hash"
|
||||
"github.com/actions/actions-runner-controller/vault"
|
||||
"golang.org/x/net/http/httpproxy"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -69,7 +70,10 @@ type AutoscalingRunnerSetSpec struct {
|
||||
Proxy *ProxyConfig `json:"proxy,omitempty"`
|
||||
|
||||
// +optional
|
||||
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
|
||||
GitHubServerTLS *TLSConfig `json:"githubServerTLS,omitempty"`
|
||||
|
||||
// +optional
|
||||
VaultConfig *VaultConfig `json:"vaultConfig,omitempty"`
|
||||
|
||||
// Required
|
||||
Template corev1.PodTemplateSpec `json:"template,omitempty"`
|
||||
@@ -89,12 +93,12 @@ type AutoscalingRunnerSetSpec struct {
|
||||
MinRunners *int `json:"minRunners,omitempty"`
|
||||
}
|
||||
|
||||
type GitHubServerTLSConfig struct {
|
||||
type TLSConfig struct {
|
||||
// Required
|
||||
CertificateFrom *TLSCertificateSource `json:"certificateFrom,omitempty"`
|
||||
}
|
||||
|
||||
func (c *GitHubServerTLSConfig) ToCertPool(keyFetcher func(name, key string) ([]byte, error)) (*x509.CertPool, error) {
|
||||
func (c *TLSConfig) ToCertPool(keyFetcher func(name, key string) ([]byte, error)) (*x509.CertPool, error) {
|
||||
if c.CertificateFrom == nil {
|
||||
return nil, fmt.Errorf("certificateFrom not specified")
|
||||
}
|
||||
@@ -142,7 +146,7 @@ type ProxyConfig struct {
|
||||
NoProxy []string `json:"noProxy,omitempty"`
|
||||
}
|
||||
|
||||
func (c *ProxyConfig) toHTTPProxyConfig(secretFetcher func(string) (*corev1.Secret, error)) (*httpproxy.Config, error) {
|
||||
func (c *ProxyConfig) ToHTTPProxyConfig(secretFetcher func(string) (*corev1.Secret, error)) (*httpproxy.Config, error) {
|
||||
config := &httpproxy.Config{
|
||||
NoProxy: strings.Join(c.NoProxy, ","),
|
||||
}
|
||||
@@ -201,7 +205,7 @@ func (c *ProxyConfig) toHTTPProxyConfig(secretFetcher func(string) (*corev1.Secr
|
||||
}
|
||||
|
||||
func (c *ProxyConfig) ToSecretData(secretFetcher func(string) (*corev1.Secret, error)) (map[string][]byte, error) {
|
||||
config, err := c.toHTTPProxyConfig(secretFetcher)
|
||||
config, err := c.ToHTTPProxyConfig(secretFetcher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -215,7 +219,7 @@ func (c *ProxyConfig) ToSecretData(secretFetcher func(string) (*corev1.Secret, e
|
||||
}
|
||||
|
||||
func (c *ProxyConfig) ProxyFunc(secretFetcher func(string) (*corev1.Secret, error)) (func(*http.Request) (*url.URL, error), error) {
|
||||
config, err := c.toHTTPProxyConfig(secretFetcher)
|
||||
config, err := c.ToHTTPProxyConfig(secretFetcher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -235,6 +239,26 @@ type ProxyServerConfig struct {
|
||||
CredentialSecretRef string `json:"credentialSecretRef,omitempty"`
|
||||
}
|
||||
|
||||
type VaultConfig struct {
|
||||
// +optional
|
||||
Type vault.VaultType `json:"type,omitempty"`
|
||||
// +optional
|
||||
AzureKeyVault *AzureKeyVaultConfig `json:"azureKeyVault,omitempty"`
|
||||
// +optional
|
||||
Proxy *ProxyConfig `json:"proxy,omitempty"`
|
||||
}
|
||||
|
||||
type AzureKeyVaultConfig struct {
|
||||
// +required
|
||||
URL string `json:"url,omitempty"`
|
||||
// +required
|
||||
TenantID string `json:"tenantId,omitempty"`
|
||||
// +required
|
||||
ClientID string `json:"clientId,omitempty"`
|
||||
// +required
|
||||
CertificatePath string `json:"certificatePath,omitempty"`
|
||||
}
|
||||
|
||||
// MetricsConfig holds configuration parameters for each metric type
|
||||
type MetricsConfig struct {
|
||||
// +optional
|
||||
@@ -279,10 +303,37 @@ type AutoscalingRunnerSetStatus struct {
|
||||
FailedEphemeralRunners int `json:"failedEphemeralRunners"`
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) ListenerSpecHash(githubSecret *corev1.Secret) string {
|
||||
func (ars *AutoscalingRunnerSet) ListenerSpecHash() string {
|
||||
arsSpec := ars.Spec.DeepCopy()
|
||||
secret := githubSecret.DeepCopy()
|
||||
return hash.ComputeCombinedObjectsHash(&arsSpec, &secret)
|
||||
spec := arsSpec
|
||||
return hash.ComputeTemplateHash(&spec)
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) GitHubConfigSecret() string {
|
||||
return ars.Spec.GitHubConfigSecret
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) GitHubConfigUrl() string {
|
||||
return ars.Spec.GitHubConfigUrl
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) GitHubProxy() *ProxyConfig {
|
||||
return ars.Spec.Proxy
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) GitHubServerTLS() *TLSConfig {
|
||||
return ars.Spec.GitHubServerTLS
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) VaultConfig() *VaultConfig {
|
||||
return ars.Spec.VaultConfig
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) VaultProxy() *ProxyConfig {
|
||||
if ars.Spec.VaultConfig != nil {
|
||||
return ars.Spec.VaultConfig.Proxy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string {
|
||||
@@ -292,7 +343,7 @@ func (ars *AutoscalingRunnerSet) RunnerSetSpecHash() string {
|
||||
RunnerGroup string
|
||||
RunnerScaleSetName string
|
||||
Proxy *ProxyConfig
|
||||
GitHubServerTLS *GitHubServerTLSConfig
|
||||
GitHubServerTLS *TLSConfig
|
||||
Template corev1.PodTemplateSpec
|
||||
}
|
||||
spec := &runnerSetSpec{
|
||||
|
||||
@@ -67,6 +67,33 @@ func (er *EphemeralRunner) HasContainerHookConfigured() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) GitHubConfigSecret() string {
|
||||
return er.Spec.GitHubConfigSecret
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) GitHubConfigUrl() string {
|
||||
return er.Spec.GitHubConfigUrl
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) GitHubProxy() *ProxyConfig {
|
||||
return er.Spec.Proxy
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) GitHubServerTLS() *TLSConfig {
|
||||
return er.Spec.GitHubServerTLS
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) VaultConfig() *VaultConfig {
|
||||
return er.Spec.VaultConfig
|
||||
}
|
||||
|
||||
func (er *EphemeralRunner) VaultProxy() *ProxyConfig {
|
||||
if er.Spec.VaultConfig != nil {
|
||||
return er.Spec.VaultConfig.Proxy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EphemeralRunnerSpec defines the desired state of EphemeralRunner
|
||||
type EphemeralRunnerSpec struct {
|
||||
// +required
|
||||
@@ -75,6 +102,9 @@ type EphemeralRunnerSpec struct {
|
||||
// +required
|
||||
GitHubConfigSecret string `json:"githubConfigSecret,omitempty"`
|
||||
|
||||
// +optional
|
||||
GitHubServerTLS *TLSConfig `json:"githubServerTLS,omitempty"`
|
||||
|
||||
// +required
|
||||
RunnerScaleSetId int `json:"runnerScaleSetId,omitempty"`
|
||||
|
||||
@@ -85,7 +115,7 @@ type EphemeralRunnerSpec struct {
|
||||
ProxySecretRef string `json:"proxySecretRef,omitempty"`
|
||||
|
||||
// +optional
|
||||
GitHubServerTLS *GitHubServerTLSConfig `json:"githubServerTLS,omitempty"`
|
||||
VaultConfig *VaultConfig `json:"vaultConfig,omitempty"`
|
||||
|
||||
corev1.PodTemplateSpec `json:",inline"`
|
||||
}
|
||||
@@ -119,7 +149,7 @@ type EphemeralRunnerStatus struct {
|
||||
RunnerJITConfig string `json:"runnerJITConfig,omitempty"`
|
||||
|
||||
// +optional
|
||||
Failures map[string]bool `json:"failures,omitempty"`
|
||||
Failures map[string]metav1.Time `json:"failures,omitempty"`
|
||||
|
||||
// +optional
|
||||
JobRequestId int64 `json:"jobRequestId,omitempty"`
|
||||
@@ -137,6 +167,20 @@ type EphemeralRunnerStatus struct {
|
||||
JobDisplayName string `json:"jobDisplayName,omitempty"`
|
||||
}
|
||||
|
||||
func (s *EphemeralRunnerStatus) LastFailure() metav1.Time {
|
||||
var maxTime metav1.Time
|
||||
if len(s.Failures) == 0 {
|
||||
return maxTime
|
||||
}
|
||||
|
||||
for _, ts := range s.Failures {
|
||||
if ts.After(maxTime.Time) {
|
||||
maxTime = ts
|
||||
}
|
||||
}
|
||||
return maxTime
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// EphemeralRunnerList contains a list of EphemeralRunner
|
||||
|
||||
@@ -60,9 +60,35 @@ type EphemeralRunnerSet struct {
|
||||
Status EphemeralRunnerSetStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
func (ers *EphemeralRunnerSet) GitHubConfigSecret() string {
|
||||
return ers.Spec.EphemeralRunnerSpec.GitHubConfigSecret
|
||||
}
|
||||
|
||||
func (ers *EphemeralRunnerSet) GitHubConfigUrl() string {
|
||||
return ers.Spec.EphemeralRunnerSpec.GitHubConfigUrl
|
||||
}
|
||||
|
||||
func (ers *EphemeralRunnerSet) GitHubProxy() *ProxyConfig {
|
||||
return ers.Spec.EphemeralRunnerSpec.Proxy
|
||||
}
|
||||
|
||||
func (ers *EphemeralRunnerSet) GitHubServerTLS() *TLSConfig {
|
||||
return ers.Spec.EphemeralRunnerSpec.GitHubServerTLS
|
||||
}
|
||||
|
||||
func (ers *EphemeralRunnerSet) VaultConfig() *VaultConfig {
|
||||
return ers.Spec.EphemeralRunnerSpec.VaultConfig
|
||||
}
|
||||
|
||||
func (ers *EphemeralRunnerSet) VaultProxy() *ProxyConfig {
|
||||
if ers.Spec.EphemeralRunnerSpec.VaultConfig != nil {
|
||||
return ers.Spec.EphemeralRunnerSpec.VaultConfig.Proxy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EphemeralRunnerSetList contains a list of EphemeralRunnerSet
|
||||
// +kubebuilder:object:root=true
|
||||
type EphemeralRunnerSetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) {
|
||||
t.Run("returns an error if CertificateFrom not specified", func(t *testing.T) {
|
||||
c := &v1alpha1.GitHubServerTLSConfig{
|
||||
c := &v1alpha1.TLSConfig{
|
||||
CertificateFrom: nil,
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("returns an error if CertificateFrom.ConfigMapKeyRef not specified", func(t *testing.T) {
|
||||
c := &v1alpha1.GitHubServerTLSConfig{
|
||||
c := &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{},
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestGitHubServerTLSConfig_ToCertPool(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("returns a valid cert pool with correct configuration", func(t *testing.T) {
|
||||
c := &v1alpha1.GitHubServerTLSConfig{
|
||||
c := &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
|
||||
72
apis/actions.github.com/v1alpha1/version.go
Normal file
72
apis/actions.github.com/v1alpha1/version.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package v1alpha1
|
||||
|
||||
import "strings"
|
||||
|
||||
func IsVersionAllowed(resourceVersion, buildVersion string) bool {
|
||||
if buildVersion == "dev" || resourceVersion == buildVersion || strings.HasPrefix(buildVersion, "canary-") {
|
||||
return true
|
||||
}
|
||||
|
||||
rv, ok := parseSemver(resourceVersion)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
bv, ok := parseSemver(buildVersion)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return rv.major == bv.major && rv.minor == bv.minor
|
||||
}
|
||||
|
||||
type semver struct {
|
||||
major string
|
||||
minor string
|
||||
}
|
||||
|
||||
func parseSemver(v string) (p semver, ok bool) {
|
||||
if v == "" {
|
||||
return
|
||||
}
|
||||
p.major, v, ok = parseInt(v)
|
||||
if !ok {
|
||||
return p, false
|
||||
}
|
||||
if v == "" {
|
||||
p.minor = "0"
|
||||
return p, true
|
||||
}
|
||||
if v[0] != '.' {
|
||||
return p, false
|
||||
}
|
||||
p.minor, v, ok = parseInt(v[1:])
|
||||
if !ok {
|
||||
return p, false
|
||||
}
|
||||
if v == "" {
|
||||
return p, true
|
||||
}
|
||||
if v[0] != '.' {
|
||||
return p, false
|
||||
}
|
||||
if _, _, ok = parseInt(v[1:]); !ok {
|
||||
return p, false
|
||||
}
|
||||
return p, true
|
||||
}
|
||||
|
||||
func parseInt(v string) (t, rest string, ok bool) {
|
||||
if v == "" {
|
||||
return
|
||||
}
|
||||
if v[0] < '0' || '9' < v[0] {
|
||||
return
|
||||
}
|
||||
i := 1
|
||||
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
|
||||
i++
|
||||
}
|
||||
if v[0] == '0' && i != 1 {
|
||||
return
|
||||
}
|
||||
return v[:i], v[i:], true
|
||||
}
|
||||
60
apis/actions.github.com/v1alpha1/version_test.go
Normal file
60
apis/actions.github.com/v1alpha1/version_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package v1alpha1_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestIsVersionAllowed(t *testing.T) {
|
||||
t.Parallel()
|
||||
tt := map[string]struct {
|
||||
resourceVersion string
|
||||
buildVersion string
|
||||
want bool
|
||||
}{
|
||||
"dev should always be allowed": {
|
||||
resourceVersion: "0.11.0",
|
||||
buildVersion: "dev",
|
||||
want: true,
|
||||
},
|
||||
"resourceVersion is not semver": {
|
||||
resourceVersion: "dev",
|
||||
buildVersion: "0.11.0",
|
||||
want: false,
|
||||
},
|
||||
"buildVersion is not semver": {
|
||||
resourceVersion: "0.11.0",
|
||||
buildVersion: "NA",
|
||||
want: false,
|
||||
},
|
||||
"major version mismatch": {
|
||||
resourceVersion: "0.11.0",
|
||||
buildVersion: "1.11.0",
|
||||
want: false,
|
||||
},
|
||||
"minor version mismatch": {
|
||||
resourceVersion: "0.11.0",
|
||||
buildVersion: "0.10.0",
|
||||
want: false,
|
||||
},
|
||||
"patch version mismatch": {
|
||||
resourceVersion: "0.11.1",
|
||||
buildVersion: "0.11.0",
|
||||
want: true,
|
||||
},
|
||||
"arbitrary version match": {
|
||||
resourceVersion: "abc",
|
||||
buildVersion: "abc",
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range tt {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got := v1alpha1.IsVersionAllowed(tc.resourceVersion, tc.buildVersion)
|
||||
assert.Equal(t, tc.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -22,6 +22,7 @@ package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -99,7 +100,12 @@ func (in *AutoscalingListenerSpec) DeepCopyInto(out *AutoscalingListenerSpec) {
|
||||
}
|
||||
if in.GitHubServerTLS != nil {
|
||||
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
|
||||
*out = new(GitHubServerTLSConfig)
|
||||
*out = new(TLSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.VaultConfig != nil {
|
||||
in, out := &in.VaultConfig, &out.VaultConfig
|
||||
*out = new(VaultConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Metrics != nil {
|
||||
@@ -208,7 +214,12 @@ func (in *AutoscalingRunnerSetSpec) DeepCopyInto(out *AutoscalingRunnerSetSpec)
|
||||
}
|
||||
if in.GitHubServerTLS != nil {
|
||||
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
|
||||
*out = new(GitHubServerTLSConfig)
|
||||
*out = new(TLSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.VaultConfig != nil {
|
||||
in, out := &in.VaultConfig, &out.VaultConfig
|
||||
*out = new(VaultConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.Template.DeepCopyInto(&out.Template)
|
||||
@@ -259,6 +270,21 @@ func (in *AutoscalingRunnerSetStatus) DeepCopy() *AutoscalingRunnerSetStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AzureKeyVaultConfig) DeepCopyInto(out *AzureKeyVaultConfig) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureKeyVaultConfig.
|
||||
func (in *AzureKeyVaultConfig) DeepCopy() *AzureKeyVaultConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AzureKeyVaultConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CounterMetric) DeepCopyInto(out *CounterMetric) {
|
||||
*out = *in
|
||||
@@ -431,14 +457,19 @@ func (in *EphemeralRunnerSetStatus) DeepCopy() *EphemeralRunnerSetStatus {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralRunnerSpec) DeepCopyInto(out *EphemeralRunnerSpec) {
|
||||
*out = *in
|
||||
if in.GitHubServerTLS != nil {
|
||||
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
|
||||
*out = new(TLSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Proxy != nil {
|
||||
in, out := &in.Proxy, &out.Proxy
|
||||
*out = new(ProxyConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.GitHubServerTLS != nil {
|
||||
in, out := &in.GitHubServerTLS, &out.GitHubServerTLS
|
||||
*out = new(GitHubServerTLSConfig)
|
||||
if in.VaultConfig != nil {
|
||||
in, out := &in.VaultConfig, &out.VaultConfig
|
||||
*out = new(VaultConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.PodTemplateSpec.DeepCopyInto(&out.PodTemplateSpec)
|
||||
@@ -459,9 +490,9 @@ func (in *EphemeralRunnerStatus) DeepCopyInto(out *EphemeralRunnerStatus) {
|
||||
*out = *in
|
||||
if in.Failures != nil {
|
||||
in, out := &in.Failures, &out.Failures
|
||||
*out = make(map[string]bool, len(*in))
|
||||
*out = make(map[string]metav1.Time, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -496,26 +527,6 @@ func (in *GaugeMetric) DeepCopy() *GaugeMetric {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitHubServerTLSConfig) DeepCopyInto(out *GitHubServerTLSConfig) {
|
||||
*out = *in
|
||||
if in.CertificateFrom != nil {
|
||||
in, out := &in.CertificateFrom, &out.CertificateFrom
|
||||
*out = new(TLSCertificateSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubServerTLSConfig.
|
||||
func (in *GitHubServerTLSConfig) DeepCopy() *GitHubServerTLSConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GitHubServerTLSConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HistogramMetric) DeepCopyInto(out *HistogramMetric) {
|
||||
*out = *in
|
||||
@@ -668,3 +679,48 @@ func (in *TLSCertificateSource) DeepCopy() *TLSCertificateSource {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TLSConfig) DeepCopyInto(out *TLSConfig) {
|
||||
*out = *in
|
||||
if in.CertificateFrom != nil {
|
||||
in, out := &in.CertificateFrom, &out.CertificateFrom
|
||||
*out = new(TLSCertificateSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig.
|
||||
func (in *TLSConfig) DeepCopy() *TLSConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TLSConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VaultConfig) DeepCopyInto(out *VaultConfig) {
|
||||
*out = *in
|
||||
if in.AzureKeyVault != nil {
|
||||
in, out := &in.AzureKeyVault, &out.AzureKeyVault
|
||||
*out = new(AzureKeyVaultConfig)
|
||||
**out = **in
|
||||
}
|
||||
if in.Proxy != nil {
|
||||
in, out := &in.Proxy, &out.Proxy
|
||||
*out = new(ProxyConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultConfig.
|
||||
func (in *VaultConfig) DeepCopy() *VaultConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VaultConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -215,10 +215,10 @@ func (rs *RunnerSpec) validateRepository() error {
|
||||
foundCount += 1
|
||||
}
|
||||
if foundCount == 0 {
|
||||
return errors.New("Spec needs enterprise, organization or repository")
|
||||
return errors.New("spec needs enterprise, organization or repository")
|
||||
}
|
||||
if foundCount > 1 {
|
||||
return errors.New("Spec cannot have many fields defined enterprise, organization and repository")
|
||||
return errors.New("spec cannot have many fields defined enterprise, organization and repository")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow
|
||||
remote: origin
|
||||
target-branch: master
|
||||
lint-conf: charts/.ci/lint-config.yaml
|
||||
chart-repos:
|
||||
- jetstack=https://charts.jetstack.io
|
||||
check-version-increment: false # Disable checking that the chart version has been bumped
|
||||
charts:
|
||||
- charts/gha-runner-scale-set-controller
|
||||
- charts/gha-runner-scale-set
|
||||
- charts/gha-runner-scale-set-controller
|
||||
- charts/gha-runner-scale-set
|
||||
skip-clean-up: true
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
# This file defines the config for "ct" (chart tester) used by the helm linting GitHub workflow
|
||||
remote: origin
|
||||
target-branch: master
|
||||
lint-conf: charts/.ci/lint-config.yaml
|
||||
chart-repos:
|
||||
- jetstack=https://charts.jetstack.io
|
||||
check-version-increment: false # Disable checking that the chart version has been bumped
|
||||
charts:
|
||||
- charts/actions-runner-controller
|
||||
- charts/actions-runner-controller
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
for chart in `ls charts`;
|
||||
do
|
||||
helm template --values charts/$chart/ci/ci-values.yaml charts/$chart | kube-score score - \
|
||||
@@ -12,4 +11,4 @@ helm template --values charts/$chart/ci/ci-values.yaml charts/$chart | kube-scor
|
||||
--enable-optional-test container-security-context-privileged \
|
||||
--enable-optional-test container-security-context-readonlyrootfilesystem \
|
||||
--ignore-test container-security-context
|
||||
done
|
||||
done
|
||||
|
||||
@@ -44,7 +44,7 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
||||
| `metrics.serviceMonitor.enable` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||
| `metrics.serviceMonitor.interval` | Configure the interval that Prometheus should scrap the controller's metrics | 1m |
|
||||
| `metrics.serviceMonitor.namespace | Namespace which Prometheus is running in | `Release.Namespace` (the default namespace of the helm chart). |
|
||||
| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `Release.Namespace` (the default namespace of the helm chart). |
|
||||
| `metrics.serviceMonitor.timeout` | Configure the timeout the timeout of Prometheus scrapping. | 30s |
|
||||
| `metrics.serviceAnnotations` | Set annotations for the provisioned metrics service resource | |
|
||||
| `metrics.port` | Set port of metrics service | 8443 |
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.11.0
|
||||
version: 0.12.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.11.0"
|
||||
appVersion: "0.12.0"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -7863,6 +7863,53 @@ spec:
|
||||
- containers
|
||||
type: object
|
||||
type: object
|
||||
vaultConfig:
|
||||
properties:
|
||||
azureKeyVault:
|
||||
properties:
|
||||
certificatePath:
|
||||
type: string
|
||||
clientId:
|
||||
type: string
|
||||
tenantId:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
required:
|
||||
- certificatePath
|
||||
- clientId
|
||||
- tenantId
|
||||
- url
|
||||
type: object
|
||||
proxy:
|
||||
properties:
|
||||
http:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
https:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
noProxy:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type:
|
||||
description: |-
|
||||
VaultType represents the type of vault that can be used in the application.
|
||||
It is used to identify which vault integration should be used to resolve secrets.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: AutoscalingListenerStatus defines the observed state of AutoscalingListener
|
||||
|
||||
@@ -15504,6 +15504,53 @@ spec:
|
||||
- containers
|
||||
type: object
|
||||
type: object
|
||||
vaultConfig:
|
||||
properties:
|
||||
azureKeyVault:
|
||||
properties:
|
||||
certificatePath:
|
||||
type: string
|
||||
clientId:
|
||||
type: string
|
||||
tenantId:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
required:
|
||||
- certificatePath
|
||||
- clientId
|
||||
- tenantId
|
||||
- url
|
||||
type: object
|
||||
proxy:
|
||||
properties:
|
||||
http:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
https:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
noProxy:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type:
|
||||
description: |-
|
||||
VaultType represents the type of vault that can be used in the application.
|
||||
It is used to identify which vault integration should be used to resolve secrets.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet
|
||||
|
||||
@@ -7784,6 +7784,53 @@ spec:
|
||||
required:
|
||||
- containers
|
||||
type: object
|
||||
vaultConfig:
|
||||
properties:
|
||||
azureKeyVault:
|
||||
properties:
|
||||
certificatePath:
|
||||
type: string
|
||||
clientId:
|
||||
type: string
|
||||
tenantId:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
required:
|
||||
- certificatePath
|
||||
- clientId
|
||||
- tenantId
|
||||
- url
|
||||
type: object
|
||||
proxy:
|
||||
properties:
|
||||
http:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
https:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
noProxy:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type:
|
||||
description: |-
|
||||
VaultType represents the type of vault that can be used in the application.
|
||||
It is used to identify which vault integration should be used to resolve secrets.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- githubConfigSecret
|
||||
- githubConfigUrl
|
||||
@@ -7794,7 +7841,8 @@ spec:
|
||||
properties:
|
||||
failures:
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
format: date-time
|
||||
type: string
|
||||
type: object
|
||||
jobDisplayName:
|
||||
type: string
|
||||
|
||||
@@ -7778,6 +7778,53 @@ spec:
|
||||
required:
|
||||
- containers
|
||||
type: object
|
||||
vaultConfig:
|
||||
properties:
|
||||
azureKeyVault:
|
||||
properties:
|
||||
certificatePath:
|
||||
type: string
|
||||
clientId:
|
||||
type: string
|
||||
tenantId:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
required:
|
||||
- certificatePath
|
||||
- clientId
|
||||
- tenantId
|
||||
- url
|
||||
type: object
|
||||
proxy:
|
||||
properties:
|
||||
http:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
https:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
noProxy:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type:
|
||||
description: |-
|
||||
VaultType represents the type of vault that can be used in the application.
|
||||
It is used to identify which vault integration should be used to resolve secrets.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- githubConfigSecret
|
||||
- githubConfigUrl
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.11.0
|
||||
version: 0.12.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.11.0"
|
||||
appVersion: "0.12.0"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -106,6 +106,17 @@ env:
|
||||
value: "123"
|
||||
securityContext:
|
||||
privileged: true
|
||||
{{- if (ge (.Capabilities.KubeVersion.Minor | int) 29) }}
|
||||
restartPolicy: Always
|
||||
startupProbe:
|
||||
exec:
|
||||
command:
|
||||
- docker
|
||||
- info
|
||||
initialDelaySeconds: 0
|
||||
failureThreshold: 24
|
||||
periodSeconds: 5
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: work
|
||||
mountPath: /home/runner/_work
|
||||
|
||||
@@ -45,6 +45,7 @@ metadata:
|
||||
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
actions.github.com/cleanup-no-permission-service-account-name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
|
||||
{{- end }}
|
||||
|
||||
spec:
|
||||
githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }}
|
||||
githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }}
|
||||
@@ -65,6 +66,24 @@ spec:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if and .Values.keyVault .Values.keyVault.type }}
|
||||
vaultConfig:
|
||||
type: {{ .Values.keyVault.type }}
|
||||
{{- if .Values.keyVault.proxy }}
|
||||
proxy: {{- toYaml .Values.keyVault.proxy | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.keyVault.type "azure_key_vault" }}
|
||||
azureKeyVault:
|
||||
url: {{ .Values.keyVault.azureKeyVault.url }}
|
||||
tenantId: {{ .Values.keyVault.azureKeyVault.tenantId }}
|
||||
clientId: {{ .Values.keyVault.azureKeyVault.clientId }}
|
||||
certificatePath: {{ .Values.keyVault.azureKeyVault.certificatePath }}
|
||||
secretKey: {{ .Values.keyVault.azureKeyVault.secretKey }}
|
||||
{{- else }}
|
||||
{{- fail "Unsupported keyVault type: " .Values.keyVault.type }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.proxy }}
|
||||
proxy:
|
||||
{{- if .Values.proxy.http }}
|
||||
@@ -149,6 +168,10 @@ spec:
|
||||
- name: init-dind-externals
|
||||
{{- include "gha-runner-scale-set.dind-init-container" . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if (ge (.Capabilities.KubeVersion.Minor | int) 29) }}
|
||||
- name: dind
|
||||
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.template.spec.initContainers }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
@@ -157,8 +180,10 @@ spec:
|
||||
{{- if eq $containerMode.type "dind" }}
|
||||
- name: runner
|
||||
{{- include "gha-runner-scale-set.dind-runner-container" . | nindent 8 }}
|
||||
{{- if not (ge (.Capabilities.KubeVersion.Minor | int) 29) }}
|
||||
- name: dind
|
||||
{{- include "gha-runner-scale-set.dind-container" . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- include "gha-runner-scale-set.non-runner-non-dind-containers" . | nindent 6 }}
|
||||
{{- else if eq $containerMode.type "kubernetes" }}
|
||||
- name: runner
|
||||
|
||||
@@ -728,20 +728,20 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraInitContainers(t *testin
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 3, "InitContainers should be 3")
|
||||
assert.Equal(t, "kube-init", ars.Spec.Template.Spec.InitContainers[1].Name, "InitContainers[1] Name should be kube-init")
|
||||
assert.Equal(t, "runner-image:latest", ars.Spec.Template.Spec.InitContainers[1].Image, "InitContainers[1] Image should be runner-image:latest")
|
||||
assert.Equal(t, "sudo", ars.Spec.Template.Spec.InitContainers[1].Command[0], "InitContainers[1] Command[0] should be sudo")
|
||||
assert.Equal(t, "chown", ars.Spec.Template.Spec.InitContainers[1].Command[1], "InitContainers[1] Command[1] should be chown")
|
||||
assert.Equal(t, "-R", ars.Spec.Template.Spec.InitContainers[1].Command[2], "InitContainers[1] Command[2] should be -R")
|
||||
assert.Equal(t, "1001:123", ars.Spec.Template.Spec.InitContainers[1].Command[3], "InitContainers[1] Command[3] should be 1001:123")
|
||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].Command[4], "InitContainers[1] Command[4] should be /home/runner/_work")
|
||||
assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].Name, "InitContainers[1] VolumeMounts[0] Name should be work")
|
||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].MountPath, "InitContainers[1] VolumeMounts[0] MountPath should be /home/runner/_work")
|
||||
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 4, "InitContainers should be 4")
|
||||
assert.Equal(t, "kube-init", ars.Spec.Template.Spec.InitContainers[2].Name, "InitContainers[1] Name should be kube-init")
|
||||
assert.Equal(t, "runner-image:latest", ars.Spec.Template.Spec.InitContainers[2].Image, "InitContainers[1] Image should be runner-image:latest")
|
||||
assert.Equal(t, "sudo", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[1] Command[0] should be sudo")
|
||||
assert.Equal(t, "chown", ars.Spec.Template.Spec.InitContainers[2].Command[1], "InitContainers[1] Command[1] should be chown")
|
||||
assert.Equal(t, "-R", ars.Spec.Template.Spec.InitContainers[2].Command[2], "InitContainers[1] Command[2] should be -R")
|
||||
assert.Equal(t, "1001:123", ars.Spec.Template.Spec.InitContainers[2].Command[3], "InitContainers[1] Command[3] should be 1001:123")
|
||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[2].Command[4], "InitContainers[1] Command[4] should be /home/runner/_work")
|
||||
assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[2].VolumeMounts[0].Name, "InitContainers[1] VolumeMounts[0] Name should be work")
|
||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[2].VolumeMounts[0].MountPath, "InitContainers[1] VolumeMounts[0] MountPath should be /home/runner/_work")
|
||||
|
||||
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Name, "InitContainers[2] Name should be ls")
|
||||
assert.Equal(t, "ubuntu:latest", ars.Spec.Template.Spec.InitContainers[2].Image, "InitContainers[2] Image should be ubuntu:latest")
|
||||
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[2].Command[0], "InitContainers[2] Command[0] should be ls")
|
||||
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[3].Name, "InitContainers[2] Name should be ls")
|
||||
assert.Equal(t, "ubuntu:latest", ars.Spec.Template.Spec.InitContainers[3].Image, "InitContainers[2] Image should be ubuntu:latest")
|
||||
assert.Equal(t, "ls", ars.Spec.Template.Spec.InitContainers[3].Command[0], "InitContainers[2] Command[0] should be ls")
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoScalingRunnerSet_DinD_ExtraVolumes(t *testing.T) {
|
||||
@@ -860,13 +860,26 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
|
||||
|
||||
assert.NotNil(t, ars.Spec.Template.Spec, "Template.Spec should not be nil")
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 1, "Template.Spec should have 1 init container")
|
||||
assert.Len(t, ars.Spec.Template.Spec.InitContainers, 2, "Template.Spec should have 2 init container")
|
||||
assert.Equal(t, "init-dind-externals", ars.Spec.Template.Spec.InitContainers[0].Name)
|
||||
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.InitContainers[0].Image)
|
||||
assert.Equal(t, "cp", ars.Spec.Template.Spec.InitContainers[0].Command[0])
|
||||
assert.Equal(t, "-r /home/runner/externals/. /home/runner/tmpDir/", strings.Join(ars.Spec.Template.Spec.InitContainers[0].Args, " "))
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "Template.Spec should have 2 container")
|
||||
assert.Equal(t, "dind", ars.Spec.Template.Spec.InitContainers[1].Name)
|
||||
assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.InitContainers[1].Image)
|
||||
assert.True(t, *ars.Spec.Template.Spec.InitContainers[1].SecurityContext.Privileged)
|
||||
assert.Len(t, ars.Spec.Template.Spec.InitContainers[1].VolumeMounts, 3, "The dind container should have 3 volume mounts, dind-sock, work and externals")
|
||||
assert.Equal(t, "work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].Name)
|
||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].MountPath)
|
||||
|
||||
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[1].Name)
|
||||
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[1].MountPath)
|
||||
|
||||
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[2].Name)
|
||||
assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.InitContainers[1].VolumeMounts[2].MountPath)
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "Template.Spec should have 1 container")
|
||||
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name)
|
||||
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
|
||||
assert.Len(t, ars.Spec.Template.Spec.Containers[0].Env, 2, "The runner container should have 2 env vars, DOCKER_HOST and RUNNER_WAIT_FOR_DOCKER_IN_SECONDS")
|
||||
@@ -883,19 +896,6 @@ func TestTemplateRenderedAutoScalingRunnerSet_EnableDinD(t *testing.T) {
|
||||
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name)
|
||||
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath)
|
||||
|
||||
assert.Equal(t, "dind", ars.Spec.Template.Spec.Containers[1].Name)
|
||||
assert.Equal(t, "docker:dind", ars.Spec.Template.Spec.Containers[1].Image)
|
||||
assert.True(t, *ars.Spec.Template.Spec.Containers[1].SecurityContext.Privileged)
|
||||
assert.Len(t, ars.Spec.Template.Spec.Containers[1].VolumeMounts, 3, "The dind container should have 3 volume mounts, dind-sock, work and externals")
|
||||
assert.Equal(t, "work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].Name)
|
||||
assert.Equal(t, "/home/runner/_work", ars.Spec.Template.Spec.Containers[1].VolumeMounts[0].MountPath)
|
||||
|
||||
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].Name)
|
||||
assert.Equal(t, "/var/run", ars.Spec.Template.Spec.Containers[1].VolumeMounts[1].MountPath)
|
||||
|
||||
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].Name)
|
||||
assert.Equal(t, "/home/runner/externals", ars.Spec.Template.Spec.Containers[1].VolumeMounts[2].MountPath)
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.Volumes, 3, "Volumes should be 3")
|
||||
assert.Equal(t, "dind-sock", ars.Spec.Template.Spec.Volumes[0].Name, "Volume name should be dind-sock")
|
||||
assert.Equal(t, "dind-externals", ars.Spec.Template.Spec.Volumes[1].Name, "Volume name should be dind-externals")
|
||||
@@ -1158,7 +1158,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.GitHubServerTLSConfig{
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1178,7 +1178,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
}
|
||||
}
|
||||
require.NotNil(t, volume)
|
||||
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name)
|
||||
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
|
||||
|
||||
@@ -1218,7 +1218,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.GitHubServerTLSConfig{
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1238,7 +1238,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
}
|
||||
}
|
||||
require.NotNil(t, volume)
|
||||
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name)
|
||||
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
|
||||
|
||||
@@ -1278,7 +1278,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.GitHubServerTLSConfig{
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1298,7 +1298,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
}
|
||||
}
|
||||
require.NotNil(t, volume)
|
||||
assert.Equal(t, "certs-configmap", volume.ConfigMap.LocalObjectReference.Name)
|
||||
assert.Equal(t, "certs-configmap", volume.ConfigMap.Name)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Key)
|
||||
assert.Equal(t, "cert.pem", volume.ConfigMap.Items[0].Path)
|
||||
|
||||
@@ -1338,7 +1338,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.GitHubServerTLSConfig{
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1394,7 +1394,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.GitHubServerTLSConfig{
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1450,7 +1450,7 @@ func TestTemplateRenderedWithTLS(t *testing.T) {
|
||||
ars := render(t, options)
|
||||
|
||||
require.NotNil(t, ars.Spec.GitHubServerTLS)
|
||||
expected := &v1alpha1.GitHubServerTLSConfig{
|
||||
expected := &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1826,7 +1826,7 @@ func TestTemplateRenderedAutoScalingRunnerSet_DinDMergePodSpec(t *testing.T) {
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Len(t, ars.Spec.Template.Spec.Containers, 2, "There should be 2 containers")
|
||||
assert.Len(t, ars.Spec.Template.Spec.Containers, 1, "There should be 1 containers")
|
||||
assert.Equal(t, "runner", ars.Spec.Template.Spec.Containers[0].Name, "Container name should be runner")
|
||||
assert.Equal(t, "250m", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String(), "CPU Limit should be set")
|
||||
assert.Equal(t, "64Mi", ars.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(), "Memory Limit should be set")
|
||||
@@ -2468,3 +2468,43 @@ func TestNamespaceOverride(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoscalingRunnerSetCustomAnnotationsAndLabelsApplied(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
Logger: logger.Discard,
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
"annotations.actions\\.github\\.com/vault": "azure_key_vault",
|
||||
"annotations.actions\\.github\\.com/cleanup-manager-role-name": "not-propagated",
|
||||
"labels.custom": "custom",
|
||||
"labels.app\\.kubernetes\\.io/component": "not-propagated",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
|
||||
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||
|
||||
vault := autoscalingRunnerSet.Annotations["actions.github.com/vault"]
|
||||
assert.Equal(t, "azure_key_vault", vault)
|
||||
|
||||
custom := autoscalingRunnerSet.Labels["custom"]
|
||||
assert.Equal(t, "custom", custom)
|
||||
|
||||
assert.NotEqual(t, "not-propagated", autoscalingRunnerSet.Annotations["actions.github.com/cleanup-manager-role-name"])
|
||||
assert.NotEqual(t, "not-propagated", autoscalingRunnerSet.Labels["app.kubernetes.io/component"])
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ githubConfigUrl: ""
|
||||
## You can choose to supply:
|
||||
## A) a PAT token,
|
||||
## B) a GitHub App, or
|
||||
## C) a pre-defined Kubernetes secret.
|
||||
## C) a pre-defined secret.
|
||||
## The syntax for each of these variations is documented below.
|
||||
## (Variation A) When using a PAT token, the syntax is as follows:
|
||||
githubConfigSecret:
|
||||
@@ -17,6 +17,7 @@ githubConfigSecret:
|
||||
## (Variation B) When using a GitHub App, the syntax is as follows:
|
||||
# githubConfigSecret:
|
||||
# # NOTE: IDs MUST be strings, use quotes
|
||||
# # The github_app_id can be an app_id or the client_id
|
||||
# github_app_id: ""
|
||||
# github_app_installation_id: ""
|
||||
# github_app_private_key: |
|
||||
@@ -27,8 +28,11 @@ githubConfigSecret:
|
||||
# .
|
||||
# private key line N
|
||||
#
|
||||
## (Variation C) When using a pre-defined Kubernetes secret in the same namespace that the gha-runner-scale-set is going to deploy,
|
||||
## the syntax is as follows:
|
||||
## (Variation C) When using a pre-defined secret.
|
||||
## The secret can be pulled either directly from Kubernetes, or from the vault, depending on configuration.
|
||||
## Kubernetes secret in the same namespace that the gha-runner-scale-set is going to deploy.
|
||||
## On the other hand, if the vault is configured, secret name will be used to fetch the app configuration.
|
||||
## The syntax is as follows:
|
||||
# githubConfigSecret: pre-defined-secret
|
||||
## Notes on using pre-defined Kubernetes secrets:
|
||||
## You need to make sure your predefined secret has all the required secret data set properly.
|
||||
@@ -84,6 +88,26 @@ githubConfigSecret:
|
||||
# key: ca.crt
|
||||
# runnerMountPath: /usr/local/share/ca-certificates/
|
||||
|
||||
# keyVault:
|
||||
# Available values: "azure_key_vault"
|
||||
# type: ""
|
||||
# Configuration related to azure key vault
|
||||
# azure_key_vault:
|
||||
# url: ""
|
||||
# client_id: ""
|
||||
# tenant_id: ""
|
||||
# certificate_path: ""
|
||||
# proxy:
|
||||
# http:
|
||||
# url: http://proxy.com:1234
|
||||
# credentialSecretRef: proxy-auth # a secret with `username` and `password` keys
|
||||
# https:
|
||||
# url: http://proxy.com:1234
|
||||
# credentialSecretRef: proxy-auth # a secret with `username` and `password` keys
|
||||
# noProxy:
|
||||
# - example.com
|
||||
# - example.org
|
||||
|
||||
## Container mode is an object that provides out-of-box configuration
|
||||
## for dind and kubernetes mode. Template will be modified as documented under the
|
||||
## template object.
|
||||
@@ -130,7 +154,7 @@ githubConfigSecret:
|
||||
# counters:
|
||||
# gha_started_jobs_total:
|
||||
# labels:
|
||||
# ["repository", "organization", "enterprise", "job_name", "event_name"]
|
||||
# ["repository", "organization", "enterprise", "job_name", "event_name", "job_workflow_ref"]
|
||||
# gha_completed_jobs_total:
|
||||
# labels:
|
||||
# [
|
||||
@@ -140,6 +164,7 @@ githubConfigSecret:
|
||||
# "job_name",
|
||||
# "event_name",
|
||||
# "job_result",
|
||||
# "job_workflow_ref",
|
||||
# ]
|
||||
# gauges:
|
||||
# gha_assigned_jobs:
|
||||
@@ -161,7 +186,7 @@ githubConfigSecret:
|
||||
# histograms:
|
||||
# gha_job_startup_duration_seconds:
|
||||
# labels:
|
||||
# ["repository", "organization", "enterprise", "job_name", "event_name"]
|
||||
# ["repository", "organization", "enterprise", "job_name", "event_name","job_workflow_ref"]
|
||||
# buckets:
|
||||
# [
|
||||
# 0.01,
|
||||
@@ -219,6 +244,7 @@ githubConfigSecret:
|
||||
# "job_name",
|
||||
# "event_name",
|
||||
# "job_result",
|
||||
# "job_workflow_ref"
|
||||
# ]
|
||||
# buckets:
|
||||
# [
|
||||
@@ -283,18 +309,6 @@ template:
|
||||
## volumeMounts:
|
||||
## - name: dind-externals
|
||||
## mountPath: /home/runner/tmpDir
|
||||
## containers:
|
||||
## - name: runner
|
||||
## image: ghcr.io/actions/actions-runner:latest
|
||||
## command: ["/home/runner/run.sh"]
|
||||
## env:
|
||||
## - name: DOCKER_HOST
|
||||
## value: unix:///var/run/docker.sock
|
||||
## volumeMounts:
|
||||
## - name: work
|
||||
## mountPath: /home/runner/_work
|
||||
## - name: dind-sock
|
||||
## mountPath: /var/run
|
||||
## - name: dind
|
||||
## image: docker:dind
|
||||
## args:
|
||||
@@ -306,13 +320,29 @@ template:
|
||||
## value: "123"
|
||||
## securityContext:
|
||||
## privileged: true
|
||||
## restartPolicy: Always
|
||||
## startupProbe:
|
||||
## exec:
|
||||
## command:
|
||||
## - docker
|
||||
## - info
|
||||
## initialDelaySeconds: 0
|
||||
## failureThreshold: 24
|
||||
## periodSeconds: 5
|
||||
## containers:
|
||||
## - name: runner
|
||||
## image: ghcr.io/actions/actions-runner:latest
|
||||
## command: ["/home/runner/run.sh"]
|
||||
## env:
|
||||
## - name: DOCKER_HOST
|
||||
## value: unix:///var/run/docker.sock
|
||||
## - name: RUNNER_WAIT_FOR_DOCKER_IN_SECONDS
|
||||
## value: "120"
|
||||
## volumeMounts:
|
||||
## - name: work
|
||||
## mountPath: /home/runner/_work
|
||||
## - name: dind-sock
|
||||
## mountPath: /var/run
|
||||
## - name: dind-externals
|
||||
## mountPath: /home/runner/externals
|
||||
## volumes:
|
||||
## - name: work
|
||||
## emptyDir: {}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
// App is responsible for initializing required components and running the app.
|
||||
type App struct {
|
||||
// configured fields
|
||||
config config.Config
|
||||
config *config.Config
|
||||
logger logr.Logger
|
||||
|
||||
// initialized fields
|
||||
@@ -38,8 +38,12 @@ type Worker interface {
|
||||
}
|
||||
|
||||
func New(config config.Config) (*App, error) {
|
||||
if err := config.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("failed to validate config: %w", err)
|
||||
}
|
||||
|
||||
app := &App{
|
||||
config: config,
|
||||
config: &config,
|
||||
}
|
||||
|
||||
ghConfig, err := actions.ParseGitHubConfigFromURL(config.ConfigureUrl)
|
||||
@@ -69,8 +73,8 @@ func New(config config.Config) (*App, error) {
|
||||
Repository: ghConfig.Repository,
|
||||
ServerAddr: config.MetricsAddr,
|
||||
ServerEndpoint: config.MetricsEndpoint,
|
||||
Metrics: config.Metrics,
|
||||
Logger: app.logger.WithName("metrics exporter"),
|
||||
Metrics: *config.Metrics,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -9,19 +10,26 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/logging"
|
||||
"github.com/actions/actions-runner-controller/vault"
|
||||
"github.com/actions/actions-runner-controller/vault/azurekeyvault"
|
||||
"github.com/go-logr/logr"
|
||||
"golang.org/x/net/http/httpproxy"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
ConfigureUrl string `json:"configure_url"`
|
||||
AppID int64 `json:"app_id"`
|
||||
AppInstallationID int64 `json:"app_installation_id"`
|
||||
AppPrivateKey string `json:"app_private_key"`
|
||||
Token string `json:"token"`
|
||||
ConfigureUrl string `json:"configure_url"`
|
||||
VaultType vault.VaultType `json:"vault_type"`
|
||||
VaultLookupKey string `json:"vault_lookup_key"`
|
||||
// If the VaultType is set to "azure_key_vault", this field must be populated.
|
||||
AzureKeyVaultConfig *azurekeyvault.Config `json:"azure_key_vault,omitempty"`
|
||||
// AppConfig contains the GitHub App configuration.
|
||||
// It is initially set to nil if VaultType is set.
|
||||
// Otherwise, it is populated with the GitHub App credentials from the GitHub secret.
|
||||
*appconfig.AppConfig
|
||||
EphemeralRunnerSetNamespace string `json:"ephemeral_runner_set_namespace"`
|
||||
EphemeralRunnerSetName string `json:"ephemeral_runner_set_name"`
|
||||
MaxRunners int `json:"max_runners"`
|
||||
@@ -36,23 +44,58 @@ type Config struct {
|
||||
Metrics *v1alpha1.MetricsConfig `json:"metrics"`
|
||||
}
|
||||
|
||||
func Read(path string) (Config, error) {
|
||||
f, err := os.Open(path)
|
||||
func Read(ctx context.Context, configPath string) (*Config, error) {
|
||||
f, err := os.Open(configPath)
|
||||
if err != nil {
|
||||
return Config{}, err
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var config Config
|
||||
if err := json.NewDecoder(f).Decode(&config); err != nil {
|
||||
return Config{}, fmt.Errorf("failed to decode config: %w", err)
|
||||
return nil, fmt.Errorf("failed to decode config: %w", err)
|
||||
}
|
||||
|
||||
var vault vault.Vault
|
||||
switch config.VaultType {
|
||||
case "":
|
||||
if err := config.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("failed to validate configuration: %v", err)
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
case "azure_key_vault":
|
||||
akv, err := azurekeyvault.New(*config.AzureKeyVaultConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure Key Vault client: %w", err)
|
||||
}
|
||||
|
||||
vault = akv
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported vault type: %s", config.VaultType)
|
||||
}
|
||||
|
||||
appConfigRaw, err := vault.GetSecret(ctx, config.VaultLookupKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get app config from vault: %w", err)
|
||||
}
|
||||
|
||||
appConfig, err := appconfig.FromJSONString(appConfigRaw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read app config from string: %v", err)
|
||||
}
|
||||
|
||||
config.AppConfig = appConfig
|
||||
|
||||
if err := config.Validate(); err != nil {
|
||||
return Config{}, fmt.Errorf("failed to validate config: %w", err)
|
||||
return nil, fmt.Errorf("config validation failed: %w", err)
|
||||
}
|
||||
|
||||
return config, nil
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// Validate checks the configuration for errors.
|
||||
@@ -62,26 +105,30 @@ func (c *Config) Validate() error {
|
||||
}
|
||||
|
||||
if len(c.EphemeralRunnerSetNamespace) == 0 || len(c.EphemeralRunnerSetName) == 0 {
|
||||
return fmt.Errorf("EphemeralRunnerSetNamespace '%s' or EphemeralRunnerSetName '%s' is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
|
||||
return fmt.Errorf("EphemeralRunnerSetNamespace %q or EphemeralRunnerSetName %q is missing", c.EphemeralRunnerSetNamespace, c.EphemeralRunnerSetName)
|
||||
}
|
||||
|
||||
if c.RunnerScaleSetId == 0 {
|
||||
return fmt.Errorf("RunnerScaleSetId '%d' is missing", c.RunnerScaleSetId)
|
||||
return fmt.Errorf(`RunnerScaleSetId "%d" is missing`, c.RunnerScaleSetId)
|
||||
}
|
||||
|
||||
if c.MaxRunners < c.MinRunners {
|
||||
return fmt.Errorf("MinRunners '%d' cannot be greater than MaxRunners '%d'", c.MinRunners, c.MaxRunners)
|
||||
return fmt.Errorf(`MinRunners "%d" cannot be greater than MaxRunners "%d"`, c.MinRunners, c.MaxRunners)
|
||||
}
|
||||
|
||||
hasToken := len(c.Token) > 0
|
||||
hasPrivateKeyConfig := c.AppID > 0 && c.AppPrivateKey != ""
|
||||
|
||||
if !hasToken && !hasPrivateKeyConfig {
|
||||
return fmt.Errorf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
|
||||
if c.VaultType != "" {
|
||||
if err := c.VaultType.Validate(); err != nil {
|
||||
return fmt.Errorf("VaultType validation failed: %w", err)
|
||||
}
|
||||
if c.VaultLookupKey == "" {
|
||||
return fmt.Errorf("VaultLookupKey is required when VaultType is set to %q", c.VaultType)
|
||||
}
|
||||
}
|
||||
|
||||
if hasToken && hasPrivateKeyConfig {
|
||||
return fmt.Errorf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(c.Token), c.AppID, c.AppInstallationID, len(c.AppPrivateKey))
|
||||
if c.VaultType == "" && c.VaultLookupKey == "" {
|
||||
if err := c.AppConfig.Validate(); err != nil {
|
||||
return fmt.Errorf("AppConfig validation failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/github/actions/testserver"
|
||||
@@ -53,7 +54,9 @@ func TestCustomerServerRootCA(t *testing.T) {
|
||||
config := config.Config{
|
||||
ConfigureUrl: server.ConfigURLForOrg("myorg"),
|
||||
ServerRootCA: certsString,
|
||||
Token: "token",
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
Token: "token",
|
||||
},
|
||||
}
|
||||
|
||||
client, err := config.ActionsClient(logr.Discard())
|
||||
@@ -80,7 +83,9 @@ func TestProxySettings(t *testing.T) {
|
||||
|
||||
config := config.Config{
|
||||
ConfigureUrl: "https://github.com/org/repo",
|
||||
Token: "token",
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
Token: "token",
|
||||
},
|
||||
}
|
||||
|
||||
client, err := config.ActionsClient(logr.Discard())
|
||||
@@ -110,7 +115,9 @@ func TestProxySettings(t *testing.T) {
|
||||
|
||||
config := config.Config{
|
||||
ConfigureUrl: "https://github.com/org/repo",
|
||||
Token: "token",
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
Token: "token",
|
||||
},
|
||||
}
|
||||
|
||||
client, err := config.ActionsClient(logr.Discard(), actions.WithRetryMax(0))
|
||||
@@ -145,7 +152,9 @@ func TestProxySettings(t *testing.T) {
|
||||
|
||||
config := config.Config{
|
||||
ConfigureUrl: "https://github.com/org/repo",
|
||||
Token: "token",
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
Token: "token",
|
||||
},
|
||||
}
|
||||
|
||||
client, err := config.ActionsClient(logr.Discard())
|
||||
|
||||
@@ -1,92 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfigValidationMinMax(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 5,
|
||||
MaxRunners: 2,
|
||||
Token: "token",
|
||||
}
|
||||
err := config.Validate()
|
||||
assert.ErrorContains(t, err, "MinRunners '5' cannot be greater than MaxRunners '2", "Expected error about MinRunners > MaxRunners")
|
||||
}
|
||||
|
||||
func TestConfigValidationMissingToken(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.Validate()
|
||||
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
}
|
||||
|
||||
func TestConfigValidationAppKey(t *testing.T) {
|
||||
config := &Config{
|
||||
AppID: 1,
|
||||
AppInstallationID: 10,
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.Validate()
|
||||
expectedError := fmt.Sprintf("GitHub auth credential is missing, token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
}
|
||||
|
||||
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
|
||||
config := &Config{
|
||||
AppID: 1,
|
||||
AppInstallationID: 10,
|
||||
AppPrivateKey: "asdf",
|
||||
Token: "asdf",
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.Validate()
|
||||
expectedError := fmt.Sprintf("only one GitHub auth method supported at a time. Have both PAT and App auth: token length: '%d', appId: '%d', installationId: '%d', private key length: '%d", len(config.Token), config.AppID, config.AppInstallationID, len(config.AppPrivateKey))
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
}
|
||||
|
||||
func TestConfigValidation(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "https://github.com/actions",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
Token: "asdf",
|
||||
}
|
||||
|
||||
err := config.Validate()
|
||||
|
||||
assert.NoError(t, err, "Expected no error")
|
||||
}
|
||||
|
||||
func TestConfigValidationConfigUrl(t *testing.T) {
|
||||
config := &Config{
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
|
||||
err := config.Validate()
|
||||
|
||||
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
|
||||
}
|
||||
170
cmd/ghalistener/config/config_validation_test.go
Normal file
170
cmd/ghalistener/config/config_validation_test.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/actions/actions-runner-controller/vault"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfigValidationMinMax(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 5,
|
||||
MaxRunners: 2,
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
Token: "token",
|
||||
},
|
||||
}
|
||||
err := config.Validate()
|
||||
assert.ErrorContains(t, err, `MinRunners "5" cannot be greater than MaxRunners "2"`, "Expected error about MinRunners > MaxRunners")
|
||||
}
|
||||
|
||||
func TestConfigValidationMissingToken(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.Validate()
|
||||
expectedError := "AppConfig validation failed: missing app config"
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
}
|
||||
|
||||
func TestConfigValidationAppKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("app id integer", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
config := &Config{
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
AppID: "1",
|
||||
AppInstallationID: 10,
|
||||
},
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.Validate()
|
||||
expectedError := "AppConfig validation failed: no credentials provided: either a PAT or GitHub App credentials should be provided"
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
})
|
||||
|
||||
t.Run("app id as client id", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
config := &Config{
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
AppID: "Iv23f8doAlphaNumer1c",
|
||||
AppInstallationID: 10,
|
||||
},
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.Validate()
|
||||
expectedError := "AppConfig validation failed: no credentials provided: either a PAT or GitHub App credentials should be provided"
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigValidationOnlyOneTypeOfCredentials(t *testing.T) {
|
||||
config := &Config{
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
AppID: "1",
|
||||
AppInstallationID: 10,
|
||||
AppPrivateKey: "asdf",
|
||||
Token: "asdf",
|
||||
},
|
||||
ConfigureUrl: "github.com/some_org/some_repo",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
err := config.Validate()
|
||||
expectedError := "AppConfig validation failed: both PAT and GitHub App credentials provided. should only provide one"
|
||||
assert.ErrorContains(t, err, expectedError, "Expected error about missing auth")
|
||||
}
|
||||
|
||||
func TestConfigValidation(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "https://github.com/actions",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
AppConfig: &appconfig.AppConfig{
|
||||
Token: "asdf",
|
||||
},
|
||||
}
|
||||
|
||||
err := config.Validate()
|
||||
|
||||
assert.NoError(t, err, "Expected no error")
|
||||
}
|
||||
|
||||
func TestConfigValidationConfigUrl(t *testing.T) {
|
||||
config := &Config{
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
}
|
||||
|
||||
err := config.Validate()
|
||||
|
||||
assert.ErrorContains(t, err, "GitHubConfigUrl is not provided", "Expected error about missing ConfigureUrl")
|
||||
}
|
||||
|
||||
func TestConfigValidationWithVaultConfig(t *testing.T) {
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "https://github.com/actions",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
VaultType: vault.VaultTypeAzureKeyVault,
|
||||
VaultLookupKey: "testkey",
|
||||
}
|
||||
err := config.Validate()
|
||||
assert.NoError(t, err, "Expected no error for valid vault type")
|
||||
})
|
||||
|
||||
t.Run("invalid vault type", func(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "https://github.com/actions",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
VaultType: vault.VaultType("invalid_vault_type"),
|
||||
VaultLookupKey: "testkey",
|
||||
}
|
||||
err := config.Validate()
|
||||
assert.ErrorContains(t, err, `unknown vault type: "invalid_vault_type"`, "Expected error for invalid vault type")
|
||||
})
|
||||
|
||||
t.Run("vault type set without lookup key", func(t *testing.T) {
|
||||
config := &Config{
|
||||
ConfigureUrl: "https://github.com/actions",
|
||||
EphemeralRunnerSetNamespace: "namespace",
|
||||
EphemeralRunnerSetName: "deployment",
|
||||
RunnerScaleSetId: 1,
|
||||
MinRunners: 1,
|
||||
MaxRunners: 5,
|
||||
VaultType: vault.VaultTypeAzureKeyVault,
|
||||
VaultLookupKey: "",
|
||||
}
|
||||
err := config.Validate()
|
||||
assert.ErrorContains(t, err, `VaultLookupKey is required when VaultType is set to "azure_key_vault"`, "Expected error for vault type without lookup key")
|
||||
})
|
||||
}
|
||||
@@ -13,26 +13,27 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
|
||||
if !ok {
|
||||
fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
config, err := config.Read(configPath)
|
||||
|
||||
config, err := config.Read(ctx, configPath)
|
||||
if err != nil {
|
||||
log.Printf("Failed to read config: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
app, err := app.New(config)
|
||||
app, err := app.New(*config)
|
||||
if err != nil {
|
||||
log.Printf("Failed to initialize app: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
if err := app.Run(ctx); err != nil {
|
||||
log.Printf("Application returned an error: %v", err)
|
||||
os.Exit(1)
|
||||
|
||||
@@ -21,6 +21,7 @@ const (
|
||||
labelKeyOrganization = "organization"
|
||||
labelKeyRepository = "repository"
|
||||
labelKeyJobName = "job_name"
|
||||
labelKeyJobWorkflowRef = "job_workflow_ref"
|
||||
labelKeyEventName = "event_name"
|
||||
labelKeyJobResult = "job_result"
|
||||
)
|
||||
@@ -75,11 +76,12 @@ var metricsHelp = metricsHelpRegistry{
|
||||
|
||||
func (e *exporter) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
|
||||
return prometheus.Labels{
|
||||
labelKeyEnterprise: e.scaleSetLabels[labelKeyEnterprise],
|
||||
labelKeyOrganization: jobBase.OwnerName,
|
||||
labelKeyRepository: jobBase.RepositoryName,
|
||||
labelKeyJobName: jobBase.JobDisplayName,
|
||||
labelKeyEventName: jobBase.EventName,
|
||||
labelKeyEnterprise: e.scaleSetLabels[labelKeyEnterprise],
|
||||
labelKeyOrganization: jobBase.OwnerName,
|
||||
labelKeyRepository: jobBase.RepositoryName,
|
||||
labelKeyJobName: jobBase.JobDisplayName,
|
||||
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
|
||||
labelKeyEventName: jobBase.EventName,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,13 +154,148 @@ type ExporterConfig struct {
|
||||
ServerAddr string
|
||||
ServerEndpoint string
|
||||
Logger logr.Logger
|
||||
Metrics v1alpha1.MetricsConfig
|
||||
Metrics *v1alpha1.MetricsConfig
|
||||
}
|
||||
|
||||
var defaultMetrics = v1alpha1.MetricsConfig{
|
||||
Counters: map[string]*v1alpha1.CounterMetric{
|
||||
MetricStartedJobsTotal: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyJobName,
|
||||
labelKeyEventName,
|
||||
},
|
||||
},
|
||||
MetricCompletedJobsTotal: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyJobName,
|
||||
labelKeyEventName,
|
||||
labelKeyJobResult,
|
||||
},
|
||||
},
|
||||
},
|
||||
Gauges: map[string]*v1alpha1.GaugeMetric{
|
||||
MetricAssignedJobs: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
MetricRunningJobs: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
MetricRegisteredRunners: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
MetricBusyRunners: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
MetricMinRunners: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
MetricMaxRunners: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
MetricDesiredRunners: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
MetricIdleRunners: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyRunnerScaleSetName,
|
||||
labelKeyRunnerScaleSetNamespace,
|
||||
},
|
||||
},
|
||||
},
|
||||
Histograms: map[string]*v1alpha1.HistogramMetric{
|
||||
MetricJobStartupDurationSeconds: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyJobName,
|
||||
labelKeyEventName,
|
||||
},
|
||||
Buckets: defaultRuntimeBuckets,
|
||||
},
|
||||
MetricJobExecutionDurationSeconds: {
|
||||
Labels: []string{
|
||||
labelKeyEnterprise,
|
||||
labelKeyOrganization,
|
||||
labelKeyRepository,
|
||||
labelKeyJobName,
|
||||
labelKeyEventName,
|
||||
labelKeyJobResult,
|
||||
},
|
||||
Buckets: defaultRuntimeBuckets,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func (e *ExporterConfig) defaults() {
|
||||
if e.ServerAddr == "" {
|
||||
e.ServerAddr = ":8080"
|
||||
}
|
||||
if e.ServerEndpoint == "" {
|
||||
e.ServerEndpoint = "/metrics"
|
||||
}
|
||||
if e.Metrics == nil {
|
||||
defaultMetrics := defaultMetrics
|
||||
e.Metrics = &defaultMetrics
|
||||
}
|
||||
}
|
||||
|
||||
func NewExporter(config ExporterConfig) ServerExporter {
|
||||
config.defaults()
|
||||
reg := prometheus.NewRegistry()
|
||||
|
||||
metrics := installMetrics(config.Metrics, reg, config.Logger)
|
||||
metrics := installMetrics(*config.Metrics, reg, config.Logger)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle(
|
||||
@@ -287,7 +424,7 @@ func (e *exporter) ListenAndServe(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (e *exporter) setGauge(name string, allLabels prometheus.Labels, val float64) {
|
||||
m, ok := e.metrics.gauges[name]
|
||||
m, ok := e.gauges[name]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -299,7 +436,7 @@ func (e *exporter) setGauge(name string, allLabels prometheus.Labels, val float6
|
||||
}
|
||||
|
||||
func (e *exporter) incCounter(name string, allLabels prometheus.Labels) {
|
||||
m, ok := e.metrics.counters[name]
|
||||
m, ok := e.counters[name]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -311,7 +448,7 @@ func (e *exporter) incCounter(name string, allLabels prometheus.Labels) {
|
||||
}
|
||||
|
||||
func (e *exporter) observeHistogram(name string, allLabels prometheus.Labels, val float64) {
|
||||
m, ok := e.metrics.histograms[name]
|
||||
m, ok := e.histograms[name]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
@@ -339,7 +476,7 @@ func (e *exporter) PublishJobStarted(msg *actions.JobStarted) {
|
||||
l := e.startedJobLabels(msg)
|
||||
e.incCounter(MetricStartedJobsTotal, l)
|
||||
|
||||
startupDuration := msg.JobMessageBase.RunnerAssignTime.Unix() - msg.JobMessageBase.ScaleSetAssignTime.Unix()
|
||||
startupDuration := msg.RunnerAssignTime.Unix() - msg.ScaleSetAssignTime.Unix()
|
||||
e.observeHistogram(MetricJobStartupDurationSeconds, l, float64(startupDuration))
|
||||
}
|
||||
|
||||
@@ -347,7 +484,7 @@ func (e *exporter) PublishJobCompleted(msg *actions.JobCompleted) {
|
||||
l := e.completedJobLabels(msg)
|
||||
e.incCounter(MetricCompletedJobsTotal, l)
|
||||
|
||||
executionDuration := msg.JobMessageBase.FinishTime.Unix() - msg.JobMessageBase.RunnerAssignTime.Unix()
|
||||
executionDuration := msg.FinishTime.Unix() - msg.RunnerAssignTime.Unix()
|
||||
e.observeHistogram(MetricJobExecutionDurationSeconds, l, float64(executionDuration))
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInstallMetrics(t *testing.T) {
|
||||
@@ -86,3 +87,179 @@ func TestInstallMetrics(t *testing.T) {
|
||||
assert.Equal(t, duration.config.Labels, metricsConfig.Histograms[MetricJobStartupDurationSeconds].Labels)
|
||||
assert.Equal(t, duration.config.Buckets, defaultRuntimeBuckets)
|
||||
}
|
||||
|
||||
func TestNewExporter(t *testing.T) {
|
||||
t.Run("with defaults metrics applied", func(t *testing.T) {
|
||||
config := ExporterConfig{
|
||||
ScaleSetName: "test-scale-set",
|
||||
ScaleSetNamespace: "test-namespace",
|
||||
Enterprise: "",
|
||||
Organization: "org",
|
||||
Repository: "repo",
|
||||
ServerAddr: ":6060",
|
||||
ServerEndpoint: "/metrics",
|
||||
Logger: logr.Discard(),
|
||||
Metrics: nil, // when metrics is nil, all default metrics should be registered
|
||||
}
|
||||
|
||||
exporter, ok := NewExporter(config).(*exporter)
|
||||
require.True(t, ok, "expected exporter to be of type *exporter")
|
||||
require.NotNil(t, exporter)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
wantMetrics := installMetrics(defaultMetrics, reg, config.Logger)
|
||||
|
||||
assert.Equal(t, len(wantMetrics.counters), len(exporter.counters))
|
||||
for k, v := range wantMetrics.counters {
|
||||
assert.Contains(t, exporter.counters, k)
|
||||
assert.Equal(t, v.config, exporter.counters[k].config)
|
||||
}
|
||||
|
||||
assert.Equal(t, len(wantMetrics.gauges), len(exporter.gauges))
|
||||
for k, v := range wantMetrics.gauges {
|
||||
assert.Contains(t, exporter.gauges, k)
|
||||
assert.Equal(t, v.config, exporter.gauges[k].config)
|
||||
}
|
||||
|
||||
assert.Equal(t, len(wantMetrics.histograms), len(exporter.histograms))
|
||||
for k, v := range wantMetrics.histograms {
|
||||
assert.Contains(t, exporter.histograms, k)
|
||||
assert.Equal(t, v.config, exporter.histograms[k].config)
|
||||
}
|
||||
|
||||
require.NotNil(t, exporter.srv)
|
||||
assert.Equal(t, config.ServerAddr, exporter.srv.Addr)
|
||||
})
|
||||
|
||||
t.Run("with default server URL", func(t *testing.T) {
|
||||
config := ExporterConfig{
|
||||
ScaleSetName: "test-scale-set",
|
||||
ScaleSetNamespace: "test-namespace",
|
||||
Enterprise: "",
|
||||
Organization: "org",
|
||||
Repository: "repo",
|
||||
ServerAddr: "", // empty ServerAddr should default to ":8080"
|
||||
ServerEndpoint: "",
|
||||
Logger: logr.Discard(),
|
||||
Metrics: nil, // when metrics is nil, all default metrics should be registered
|
||||
}
|
||||
|
||||
exporter, ok := NewExporter(config).(*exporter)
|
||||
require.True(t, ok, "expected exporter to be of type *exporter")
|
||||
require.NotNil(t, exporter)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
wantMetrics := installMetrics(defaultMetrics, reg, config.Logger)
|
||||
|
||||
assert.Equal(t, len(wantMetrics.counters), len(exporter.counters))
|
||||
for k, v := range wantMetrics.counters {
|
||||
assert.Contains(t, exporter.counters, k)
|
||||
assert.Equal(t, v.config, exporter.counters[k].config)
|
||||
}
|
||||
|
||||
assert.Equal(t, len(wantMetrics.gauges), len(exporter.gauges))
|
||||
for k, v := range wantMetrics.gauges {
|
||||
assert.Contains(t, exporter.gauges, k)
|
||||
assert.Equal(t, v.config, exporter.gauges[k].config)
|
||||
}
|
||||
|
||||
assert.Equal(t, len(wantMetrics.histograms), len(exporter.histograms))
|
||||
for k, v := range wantMetrics.histograms {
|
||||
assert.Contains(t, exporter.histograms, k)
|
||||
assert.Equal(t, v.config, exporter.histograms[k].config)
|
||||
}
|
||||
|
||||
require.NotNil(t, exporter.srv)
|
||||
assert.Equal(t, exporter.srv.Addr, ":8080")
|
||||
})
|
||||
|
||||
t.Run("with metrics configured", func(t *testing.T) {
|
||||
metricsConfig := v1alpha1.MetricsConfig{
|
||||
Counters: map[string]*v1alpha1.CounterMetric{
|
||||
MetricStartedJobsTotal: {
|
||||
Labels: []string{labelKeyRepository},
|
||||
},
|
||||
},
|
||||
Gauges: map[string]*v1alpha1.GaugeMetric{
|
||||
MetricAssignedJobs: {
|
||||
Labels: []string{labelKeyRepository},
|
||||
},
|
||||
},
|
||||
Histograms: map[string]*v1alpha1.HistogramMetric{
|
||||
MetricJobExecutionDurationSeconds: {
|
||||
Labels: []string{labelKeyRepository},
|
||||
Buckets: []float64{0.1, 1},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
config := ExporterConfig{
|
||||
ScaleSetName: "test-scale-set",
|
||||
ScaleSetNamespace: "test-namespace",
|
||||
Enterprise: "",
|
||||
Organization: "org",
|
||||
Repository: "repo",
|
||||
ServerAddr: ":6060",
|
||||
ServerEndpoint: "/metrics",
|
||||
Logger: logr.Discard(),
|
||||
Metrics: &metricsConfig,
|
||||
}
|
||||
|
||||
exporter, ok := NewExporter(config).(*exporter)
|
||||
require.True(t, ok, "expected exporter to be of type *exporter")
|
||||
require.NotNil(t, exporter)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
wantMetrics := installMetrics(metricsConfig, reg, config.Logger)
|
||||
|
||||
assert.Equal(t, len(wantMetrics.counters), len(exporter.counters))
|
||||
for k, v := range wantMetrics.counters {
|
||||
assert.Contains(t, exporter.counters, k)
|
||||
assert.Equal(t, v.config, exporter.counters[k].config)
|
||||
}
|
||||
|
||||
assert.Equal(t, len(wantMetrics.gauges), len(exporter.gauges))
|
||||
for k, v := range wantMetrics.gauges {
|
||||
assert.Contains(t, exporter.gauges, k)
|
||||
assert.Equal(t, v.config, exporter.gauges[k].config)
|
||||
}
|
||||
|
||||
assert.Equal(t, len(wantMetrics.histograms), len(exporter.histograms))
|
||||
for k, v := range wantMetrics.histograms {
|
||||
assert.Contains(t, exporter.histograms, k)
|
||||
assert.Equal(t, v.config, exporter.histograms[k].config)
|
||||
}
|
||||
|
||||
require.NotNil(t, exporter.srv)
|
||||
assert.Equal(t, config.ServerAddr, exporter.srv.Addr)
|
||||
})
|
||||
}
|
||||
|
||||
func TestExporterConfigDefaults(t *testing.T) {
|
||||
config := ExporterConfig{
|
||||
ScaleSetName: "test-scale-set",
|
||||
ScaleSetNamespace: "test-namespace",
|
||||
Enterprise: "",
|
||||
Organization: "org",
|
||||
Repository: "repo",
|
||||
ServerAddr: "",
|
||||
ServerEndpoint: "",
|
||||
Logger: logr.Discard(),
|
||||
Metrics: nil, // when metrics is nil, all default metrics should be registered
|
||||
}
|
||||
|
||||
config.defaults()
|
||||
want := ExporterConfig{
|
||||
ScaleSetName: "test-scale-set",
|
||||
ScaleSetNamespace: "test-namespace",
|
||||
Enterprise: "",
|
||||
Organization: "org",
|
||||
Repository: "repo",
|
||||
ServerAddr: ":8080", // default server address
|
||||
ServerEndpoint: "/metrics", // default server endpoint
|
||||
Logger: logr.Discard(),
|
||||
Metrics: &defaultMetrics, // when metrics is nil, all default metrics should be registered
|
||||
}
|
||||
|
||||
assert.Equal(t, want, config)
|
||||
}
|
||||
|
||||
@@ -7863,6 +7863,53 @@ spec:
|
||||
- containers
|
||||
type: object
|
||||
type: object
|
||||
vaultConfig:
|
||||
properties:
|
||||
azureKeyVault:
|
||||
properties:
|
||||
certificatePath:
|
||||
type: string
|
||||
clientId:
|
||||
type: string
|
||||
tenantId:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
required:
|
||||
- certificatePath
|
||||
- clientId
|
||||
- tenantId
|
||||
- url
|
||||
type: object
|
||||
proxy:
|
||||
properties:
|
||||
http:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
https:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
noProxy:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type:
|
||||
description: |-
|
||||
VaultType represents the type of vault that can be used in the application.
|
||||
It is used to identify which vault integration should be used to resolve secrets.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: AutoscalingListenerStatus defines the observed state of AutoscalingListener
|
||||
|
||||
@@ -15504,6 +15504,53 @@ spec:
|
||||
- containers
|
||||
type: object
|
||||
type: object
|
||||
vaultConfig:
|
||||
properties:
|
||||
azureKeyVault:
|
||||
properties:
|
||||
certificatePath:
|
||||
type: string
|
||||
clientId:
|
||||
type: string
|
||||
tenantId:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
required:
|
||||
- certificatePath
|
||||
- clientId
|
||||
- tenantId
|
||||
- url
|
||||
type: object
|
||||
proxy:
|
||||
properties:
|
||||
http:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
https:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
noProxy:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type:
|
||||
description: |-
|
||||
VaultType represents the type of vault that can be used in the application.
|
||||
It is used to identify which vault integration should be used to resolve secrets.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
status:
|
||||
description: AutoscalingRunnerSetStatus defines the observed state of AutoscalingRunnerSet
|
||||
|
||||
@@ -7784,6 +7784,53 @@ spec:
|
||||
required:
|
||||
- containers
|
||||
type: object
|
||||
vaultConfig:
|
||||
properties:
|
||||
azureKeyVault:
|
||||
properties:
|
||||
certificatePath:
|
||||
type: string
|
||||
clientId:
|
||||
type: string
|
||||
tenantId:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
required:
|
||||
- certificatePath
|
||||
- clientId
|
||||
- tenantId
|
||||
- url
|
||||
type: object
|
||||
proxy:
|
||||
properties:
|
||||
http:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
https:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
noProxy:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type:
|
||||
description: |-
|
||||
VaultType represents the type of vault that can be used in the application.
|
||||
It is used to identify which vault integration should be used to resolve secrets.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- githubConfigSecret
|
||||
- githubConfigUrl
|
||||
@@ -7794,7 +7841,8 @@ spec:
|
||||
properties:
|
||||
failures:
|
||||
additionalProperties:
|
||||
type: boolean
|
||||
format: date-time
|
||||
type: string
|
||||
type: object
|
||||
jobDisplayName:
|
||||
type: string
|
||||
|
||||
@@ -7778,6 +7778,53 @@ spec:
|
||||
required:
|
||||
- containers
|
||||
type: object
|
||||
vaultConfig:
|
||||
properties:
|
||||
azureKeyVault:
|
||||
properties:
|
||||
certificatePath:
|
||||
type: string
|
||||
clientId:
|
||||
type: string
|
||||
tenantId:
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
required:
|
||||
- certificatePath
|
||||
- clientId
|
||||
- tenantId
|
||||
- url
|
||||
type: object
|
||||
proxy:
|
||||
properties:
|
||||
http:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
https:
|
||||
properties:
|
||||
credentialSecretRef:
|
||||
type: string
|
||||
url:
|
||||
description: Required
|
||||
type: string
|
||||
type: object
|
||||
noProxy:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type:
|
||||
description: |-
|
||||
VaultType represents the type of vault that can be used in the application.
|
||||
It is used to identify which vault integration should be used to resolve secrets.
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- githubConfigSecret
|
||||
- githubConfigUrl
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
hash "github.com/actions/actions-runner-controller/hash"
|
||||
@@ -77,7 +78,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !autoscalingListener.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !autoscalingListener.DeletionTimestamp.IsZero() {
|
||||
if !controllerutil.ContainsFinalizer(autoscalingListener, autoscalingListenerFinalizerName) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
@@ -128,20 +129,24 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Check if the GitHub config secret exists
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Spec.GitHubConfigSecret}, secret); err != nil {
|
||||
log.Error(err, "Failed to find GitHub config secret.",
|
||||
"namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
"name", autoscalingListener.Spec.GitHubConfigSecret)
|
||||
appConfig, err := r.GetAppConfig(ctx, &autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
err,
|
||||
"Failed to get app config for AutoscalingRunnerSet.",
|
||||
"namespace",
|
||||
autoscalingRunnerSet.Namespace,
|
||||
"name",
|
||||
autoscalingRunnerSet.GitHubConfigSecret,
|
||||
)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Make sure the runner scale set listener service account is created for the listener pod in the controller namespace
|
||||
serviceAccount := new(corev1.ServiceAccount)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerServiceAccountName(autoscalingListener)}, serviceAccount); err != nil {
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: autoscalingListener.Name}, serviceAccount); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to get listener service accounts", "namespace", autoscalingListener.Namespace, "name", scaleSetListenerServiceAccountName(autoscalingListener))
|
||||
log.Error(err, "Unable to get listener service accounts", "namespace", autoscalingListener.Namespace, "name", autoscalingListener.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
@@ -154,9 +159,9 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
|
||||
// Make sure the runner scale set listener role is created in the AutoscalingRunnerSet namespace
|
||||
listenerRole := new(rbacv1.Role)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRole); err != nil {
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRole); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to get listener role", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", scaleSetListenerRoleName(autoscalingListener))
|
||||
log.Error(err, "Unable to get listener role", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", autoscalingListener.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
@@ -176,9 +181,9 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
|
||||
// Make sure the runner scale set listener role binding is created
|
||||
listenerRoleBinding := new(rbacv1.RoleBinding)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRoleBinding); err != nil {
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRoleBinding); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to get listener role binding", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", scaleSetListenerRoleName(autoscalingListener))
|
||||
log.Error(err, "Unable to get listener role binding", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", autoscalingListener.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
@@ -218,7 +223,7 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
|
||||
// Create a listener pod in the controller namespace
|
||||
log.Info("Creating a listener pod")
|
||||
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, secret, log)
|
||||
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, appConfig, log)
|
||||
}
|
||||
|
||||
cs := listenerContainerStatus(listenerPod)
|
||||
@@ -239,6 +244,19 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
||||
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// delete the listener config secret as well, so it gets recreated when the listener pod is recreated, with any new data if it exists
|
||||
var configSecret corev1.Secret
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &configSecret)
|
||||
switch {
|
||||
case err == nil && configSecret.DeletionTimestamp.IsZero():
|
||||
log.Info("Deleting the listener config secret")
|
||||
if err := r.Delete(ctx, &configSecret); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to delete listener config secret: %w", err)
|
||||
}
|
||||
case !kerrors.IsNotFound(err):
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get the listener config secret: %w", err)
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
case cs.State.Running != nil:
|
||||
@@ -260,7 +278,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerPod)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listenerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if listenerPod.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener pod")
|
||||
if err := r.Delete(ctx, listenerPod); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener pod: %w", err)
|
||||
@@ -278,7 +296,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerConfigName(autoscalingListener)}, &secret)
|
||||
switch {
|
||||
case err == nil:
|
||||
if secret.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if secret.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener config secret")
|
||||
if err := r.Delete(ctx, &secret); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener config secret: %w", err)
|
||||
@@ -295,7 +313,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
err = r.Get(ctx, types.NamespacedName{Name: proxyListenerSecretName(autoscalingListener), Namespace: autoscalingListener.Namespace}, proxySecret)
|
||||
switch {
|
||||
case err == nil:
|
||||
if proxySecret.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if proxySecret.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener proxy secret")
|
||||
if err := r.Delete(ctx, proxySecret); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener proxy secret: %w", err)
|
||||
@@ -309,10 +327,10 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
}
|
||||
|
||||
listenerRoleBinding := new(rbacv1.RoleBinding)
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRoleBinding)
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRoleBinding)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listenerRoleBinding.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if listenerRoleBinding.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener role binding")
|
||||
if err := r.Delete(ctx, listenerRoleBinding); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener role binding: %w", err)
|
||||
@@ -325,10 +343,10 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
logger.Info("Listener role binding is deleted")
|
||||
|
||||
listenerRole := new(rbacv1.Role)
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRole)
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Name}, listenerRole)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listenerRole.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if listenerRole.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener role")
|
||||
if err := r.Delete(ctx, listenerRole); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener role: %w", err)
|
||||
@@ -342,10 +360,10 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
|
||||
logger.Info("Cleaning up the listener service account")
|
||||
listenerSa := new(corev1.ServiceAccount)
|
||||
err = r.Get(ctx, types.NamespacedName{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, listenerSa)
|
||||
err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerSa)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listenerSa.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if listenerSa.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener service account")
|
||||
if err := r.Delete(ctx, listenerSa); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener service account: %w", err)
|
||||
@@ -361,7 +379,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
newServiceAccount := r.ResourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
|
||||
newServiceAccount := r.newScaleSetListenerServiceAccount(autoscalingListener)
|
||||
|
||||
if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
@@ -377,7 +395,7 @@ func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx cont
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||
func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, appConfig *appconfig.AppConfig, logger logr.Logger) (ctrl.Result, error) {
|
||||
var envs []corev1.EnvVar
|
||||
if autoscalingListener.Spec.Proxy != nil {
|
||||
httpURL := corev1.EnvVar{
|
||||
@@ -446,7 +464,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
||||
|
||||
logger.Info("Creating listener config secret")
|
||||
|
||||
podConfig, err := r.ResourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
|
||||
podConfig, err := r.newScaleSetListenerConfig(autoscalingListener, appConfig, metricsConfig, cert)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to build listener config secret")
|
||||
return ctrl.Result{}, err
|
||||
@@ -465,7 +483,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
newPod, err := r.ResourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
|
||||
newPod, err := r.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, metricsConfig, envs...)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to build listener pod")
|
||||
return ctrl.Result{}, err
|
||||
@@ -524,23 +542,6 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
|
||||
return certificate, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||
newListenerSecret := r.ResourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
|
||||
|
||||
if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Creating listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
|
||||
if err := r.Create(ctx, newListenerSecret); err != nil {
|
||||
logger.Error(err, "Unable to create listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Created listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
data, err := autoscalingListener.Spec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) {
|
||||
var secret corev1.Secret
|
||||
@@ -581,7 +582,7 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
newRole := r.ResourceBuilder.newScaleSetListenerRole(autoscalingListener)
|
||||
newRole := r.newScaleSetListenerRole(autoscalingListener)
|
||||
|
||||
logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
|
||||
if err := r.Create(ctx, newRole); err != nil {
|
||||
@@ -609,7 +610,7 @@ func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Contex
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) {
|
||||
newRoleBinding := r.ResourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
|
||||
newRoleBinding := r.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
|
||||
|
||||
logger.Info("Creating listener role binding",
|
||||
"namespace", newRoleBinding.Namespace,
|
||||
|
||||
@@ -14,7 +14,8 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
listenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||
ghalistenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||
"github.com/actions/actions-runner-controller/github/actions/fake"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -43,10 +44,17 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
|
||||
|
||||
rb := ResourceBuilder{
|
||||
SecretResolver: secretResolver,
|
||||
}
|
||||
|
||||
controller := &AutoscalingListenerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: rb,
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -104,7 +112,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
})
|
||||
|
||||
Context("When creating a new AutoScalingListener", func() {
|
||||
It("It should create/add all required resources for a new AutoScalingListener (finalizer, service account, role, rolebinding, config, pod)", func() {
|
||||
It("It should create/add all required resources for a new AutoScalingListener (finalizer, secret, service account, role, rolebinding, pod)", func() {
|
||||
config := new(corev1.Secret)
|
||||
Eventually(
|
||||
func() error {
|
||||
@@ -138,20 +146,21 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
serviceAccount := new(corev1.ServiceAccount)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, serviceAccount)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, serviceAccount)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return serviceAccount.Name, nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerServiceAccountName(autoscalingListener)), "Service account should be created")
|
||||
autoscalingListenerTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingListener.Name), "Service account should be created")
|
||||
|
||||
// Check if role is created
|
||||
role := new(rbacv1.Role)
|
||||
Eventually(
|
||||
func() ([]rbacv1.PolicyRule, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -165,7 +174,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
roleBinding := new(rbacv1.RoleBinding)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -173,23 +182,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
return roleBinding.RoleRef.Name, nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerRoleName(autoscalingListener)), "Rolebinding should be created")
|
||||
|
||||
listenerConfig := new(corev1.Secret)
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(
|
||||
ctx,
|
||||
client.ObjectKey{
|
||||
Name: scaleSetListenerConfigName(autoscalingListener),
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
},
|
||||
listenerConfig,
|
||||
)
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).Should(Succeed(), "Listener config should be created")
|
||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListener.Name), "Rolebinding should be created")
|
||||
|
||||
// Check if pod is created
|
||||
pod := new(corev1.Pod)
|
||||
@@ -251,7 +244,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
Eventually(
|
||||
func() bool {
|
||||
roleBinding := new(rbacv1.RoleBinding)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
|
||||
return kerrors.IsNotFound(err)
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
@@ -262,7 +255,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
Eventually(
|
||||
func() bool {
|
||||
role := new(rbacv1.Role)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||
return kerrors.IsNotFound(err)
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
@@ -343,7 +336,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
role := new(rbacv1.Role)
|
||||
Eventually(
|
||||
func() ([]rbacv1.PolicyRule, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoscalingListener), Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -354,7 +347,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated")
|
||||
})
|
||||
|
||||
It("It should re-create pod whenever listener container is terminated", func() {
|
||||
It("It should re-create pod and config secret whenever listener container is terminated", func() {
|
||||
// Waiting for the pod is created
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
@@ -370,7 +363,18 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
autoscalingListenerTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
|
||||
|
||||
secret := new(corev1.Secret)
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerConfigName(autoscalingListener), Namespace: autoscalingListener.Namespace}, secret)
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).Should(Succeed(), "Config secret should be created")
|
||||
|
||||
oldPodUID := string(pod.UID)
|
||||
oldSecretUID := string(secret.UID)
|
||||
|
||||
updated := pod.DeepCopy()
|
||||
updated.Status.ContainerStatuses = []corev1.ContainerStatus{
|
||||
{
|
||||
@@ -399,6 +403,21 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be re-created")
|
||||
|
||||
// Check if config secret is re-created
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
secret := new(corev1.Secret)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerConfigName(autoscalingListener), Namespace: autoscalingListener.Namespace}, secret)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(secret.UID), nil
|
||||
},
|
||||
autoscalingListenerTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
).ShouldNot(BeEquivalentTo(oldSecretUID), "Config secret should be re-created")
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -441,10 +460,17 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
|
||||
|
||||
rb := ResourceBuilder{
|
||||
SecretResolver: secretResolver,
|
||||
}
|
||||
|
||||
controller := &AutoscalingListenerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: rb,
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -714,11 +740,17 @@ var _ = Describe("Test AutoScalingListener controller with proxy", func() {
|
||||
ctx = context.Background()
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
|
||||
|
||||
rb := ResourceBuilder{
|
||||
SecretResolver: secretResolver,
|
||||
}
|
||||
|
||||
controller := &AutoscalingListenerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: rb,
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -911,10 +943,17 @@ var _ = Describe("Test AutoScalingListener controller with template modification
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
|
||||
|
||||
rb := ResourceBuilder{
|
||||
SecretResolver: secretResolver,
|
||||
}
|
||||
|
||||
controller := &AutoscalingListenerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: rb,
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1007,6 +1046,12 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
secretResolver := NewSecretResolver(mgr.GetClient(), fake.NewMultiClient())
|
||||
|
||||
rb := ResourceBuilder{
|
||||
SecretResolver: secretResolver,
|
||||
}
|
||||
|
||||
cert, err := os.ReadFile(filepath.Join(
|
||||
"../../",
|
||||
"github",
|
||||
@@ -1028,9 +1073,10 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs")
|
||||
|
||||
controller := &AutoscalingListenerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: rb,
|
||||
}
|
||||
err = controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1045,7 +1091,7 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
|
||||
GitHubServerTLS: &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1081,7 +1127,7 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
|
||||
Spec: v1alpha1.AutoscalingListenerSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
|
||||
GitHubServerTLS: &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1125,7 +1171,7 @@ var _ = Describe("Test GitHub Server TLS configuration", func() {
|
||||
|
||||
g.Expect(config.Data["config.json"]).ToNot(BeEmpty(), "listener configuration file should not be empty")
|
||||
|
||||
var listenerConfig listenerconfig.Config
|
||||
var listenerConfig ghalistenerconfig.Config
|
||||
err = json.Unmarshal(config.Data["config.json"], &listenerConfig)
|
||||
g.Expect(err).NotTo(HaveOccurred(), "failed to parse listener configuration file")
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !autoscalingRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !autoscalingRunnerSet.DeletionTimestamp.IsZero() {
|
||||
if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
@@ -151,7 +151,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion] != build.Version {
|
||||
if !v1alpha1.IsVersionAllowed(autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], build.Version) {
|
||||
if err := r.Delete(ctx, autoscalingRunnerSet); err != nil {
|
||||
log.Error(err, "Failed to delete autoscaling runner set on version mismatch",
|
||||
"buildVersion", build.Version,
|
||||
@@ -207,14 +207,6 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
|
||||
}
|
||||
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, secret); err != nil {
|
||||
log.Error(err, "Failed to find GitHub config secret.",
|
||||
"namespace", autoscalingRunnerSet.Namespace,
|
||||
"name", autoscalingRunnerSet.Spec.GitHubConfigSecret)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
existingRunnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to list existing ephemeral runner sets")
|
||||
@@ -246,7 +238,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
|
||||
// Our listener pod is out of date, so we need to delete it to get a new recreate.
|
||||
listenerValuesHashChanged := listener.Annotations[annotationKeyValuesHash] != autoscalingRunnerSet.Annotations[annotationKeyValuesHash]
|
||||
listenerSpecHashChanged := listener.Annotations[annotationKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash(secret)
|
||||
listenerSpecHashChanged := listener.Annotations[annotationKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash()
|
||||
if listenerFound && (listenerValuesHashChanged || listenerSpecHashChanged) {
|
||||
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
|
||||
if err := r.Delete(ctx, listener); err != nil {
|
||||
@@ -297,7 +289,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
log.Info("Creating a new AutoscalingListener for the runner set", "ephemeralRunnerSetName", latestRunnerSet.Name)
|
||||
return r.createAutoScalingListenerForRunnerSet(ctx, autoscalingRunnerSet, latestRunnerSet, secret, log)
|
||||
return r.createAutoScalingListenerForRunnerSet(ctx, autoscalingRunnerSet, latestRunnerSet, log)
|
||||
}
|
||||
|
||||
// Update the status of autoscaling runner set.
|
||||
@@ -332,7 +324,7 @@ func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, au
|
||||
err = r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, &listener)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listener.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if listener.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener")
|
||||
if err := r.Delete(ctx, &listener); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener: %w", err)
|
||||
@@ -369,7 +361,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
|
||||
for i := range oldRunnerSets {
|
||||
rs := &oldRunnerSets[i]
|
||||
// already deleted but contains finalizer so it still exists
|
||||
if !rs.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !rs.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Skip ephemeral runner set since it is already marked for deletion", "name", rs.Name)
|
||||
continue
|
||||
}
|
||||
@@ -402,12 +394,12 @@ func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||
logger.Info("Creating a new runner scale set")
|
||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
|
||||
if len(autoscalingRunnerSet.Spec.RunnerScaleSetName) == 0 {
|
||||
autoscalingRunnerSet.Spec.RunnerScaleSetName = autoscalingRunnerSet.Name
|
||||
}
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set")
|
||||
logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set", "error", err.Error())
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
@@ -498,7 +490,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set")
|
||||
return ctrl.Result{}, err
|
||||
@@ -546,7 +538,7 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set")
|
||||
return ctrl.Result{}, err
|
||||
@@ -597,7 +589,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
|
||||
return nil
|
||||
}
|
||||
|
||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
actionsClient, err := r.GetActionsService(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to initialize Actions service client for updating a existing runner scale set")
|
||||
return err
|
||||
@@ -622,7 +614,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) {
|
||||
desiredRunnerSet, err := r.ResourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||
desiredRunnerSet, err := r.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
log.Error(err, "Could not create EphemeralRunnerSet")
|
||||
return ctrl.Result{}, err
|
||||
@@ -643,13 +635,7 @@ func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Co
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(
|
||||
ctx context.Context,
|
||||
autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet,
|
||||
ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet,
|
||||
githubSecret *corev1.Secret,
|
||||
log logr.Logger,
|
||||
) (ctrl.Result, error) {
|
||||
func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (ctrl.Result, error) {
|
||||
var imagePullSecrets []corev1.LocalObjectReference
|
||||
for _, imagePullSecret := range r.DefaultRunnerScaleSetListenerImagePullSecrets {
|
||||
imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{
|
||||
@@ -657,14 +643,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(
|
||||
})
|
||||
}
|
||||
|
||||
autoscalingListener, err := r.ResourceBuilder.newAutoScalingListener(
|
||||
autoscalingRunnerSet,
|
||||
ephemeralRunnerSet,
|
||||
githubSecret,
|
||||
r.ControllerNamespace,
|
||||
r.DefaultRunnerScaleSetListenerImage,
|
||||
imagePullSecrets,
|
||||
)
|
||||
autoscalingListener, err := r.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
|
||||
if err != nil {
|
||||
log.Error(err, "Could not create AutoscalingListener spec")
|
||||
return ctrl.Result{}, err
|
||||
@@ -689,74 +668,6 @@ func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Con
|
||||
return &EphemeralRunnerSets{list: list}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (actions.ActionsService, error) {
|
||||
var configSecret corev1.Secret
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, &configSecret); err != nil {
|
||||
return nil, fmt.Errorf("failed to find GitHub config secret: %w", err)
|
||||
}
|
||||
|
||||
opts, err := r.actionsClientOptionsFor(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get actions client options: %w", err)
|
||||
}
|
||||
|
||||
return r.ActionsClient.GetClientFromSecret(
|
||||
ctx,
|
||||
autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
autoscalingRunnerSet.Namespace,
|
||||
configSecret.Data,
|
||||
opts...,
|
||||
)
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) ([]actions.ClientOption, error) {
|
||||
var options []actions.ClientOption
|
||||
|
||||
if autoscalingRunnerSet.Spec.Proxy != nil {
|
||||
proxyFunc, err := autoscalingRunnerSet.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
|
||||
var secret corev1.Secret
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: s}, &secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get proxy secret %s: %w", s, err)
|
||||
}
|
||||
|
||||
return &secret, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get proxy func: %w", err)
|
||||
}
|
||||
|
||||
options = append(options, actions.WithProxy(proxyFunc))
|
||||
}
|
||||
|
||||
tlsConfig := autoscalingRunnerSet.Spec.GitHubServerTLS
|
||||
if tlsConfig != nil {
|
||||
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
|
||||
var configmap corev1.ConfigMap
|
||||
err := r.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Name: name,
|
||||
},
|
||||
&configmap,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
|
||||
}
|
||||
|
||||
return []byte(configmap.Data[key]), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get tls config: %w", err)
|
||||
}
|
||||
|
||||
options = append(options, actions.WithRootCAs(pool))
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
|
||||
@@ -70,7 +70,12 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -280,10 +285,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
||||
// This should trigger re-creation of EphemeralRunnerSet and Listener
|
||||
patched := autoscalingRunnerSet.DeepCopy()
|
||||
patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
|
||||
if patched.ObjectMeta.Annotations == nil {
|
||||
patched.ObjectMeta.Annotations = make(map[string]string)
|
||||
if patched.Annotations == nil {
|
||||
patched.Annotations = make(map[string]string)
|
||||
}
|
||||
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "test-hash"
|
||||
patched.Annotations[annotationKeyValuesHash] = "test-hash"
|
||||
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
||||
autoscalingRunnerSet = patched.DeepCopy()
|
||||
@@ -383,7 +388,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get Listener")
|
||||
|
||||
patched = autoscalingRunnerSet.DeepCopy()
|
||||
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "hash-changes"
|
||||
patched.Annotations[annotationKeyValuesHash] = "hash-changes"
|
||||
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoscalingRunnerSet))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
||||
|
||||
@@ -476,101 +481,6 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeEquivalentTo("testgroup2"), "AutoScalingRunnerSet should have the runner group in its annotation")
|
||||
})
|
||||
|
||||
It("should re-create the listener when the github secret changes", func() {
|
||||
// Wait till the listener is created
|
||||
listener := new(v1alpha1.AutoscalingListener)
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoscalingRunnerSet), Namespace: autoscalingRunnerSet.Namespace}, listener)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(Succeed(), "Listener should be created")
|
||||
|
||||
actionsClient, err := controller.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get actions client")
|
||||
|
||||
listenerCreationTimestamp := listener.ObjectMeta.CreationTimestamp
|
||||
listenerHash := listener.ObjectMeta.Annotations[annotationKeyRunnerSpecHash]
|
||||
|
||||
githubSecret := new(corev1.Secret)
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(
|
||||
ctx,
|
||||
client.ObjectKey{
|
||||
Name: configSecret.ObjectMeta.Name,
|
||||
Namespace: configSecret.ObjectMeta.Namespace,
|
||||
},
|
||||
githubSecret,
|
||||
)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(Succeed(), "Failed to fetch the github secret")
|
||||
|
||||
githubSecret.Data["update"] = []byte("update")
|
||||
err = k8sClient.Update(ctx, githubSecret)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update the github secret")
|
||||
|
||||
updatedGitHubSecret := new(corev1.Secret)
|
||||
Eventually(
|
||||
func() error {
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
client.ObjectKey{
|
||||
Name: configSecret.ObjectMeta.Name,
|
||||
Namespace: configSecret.ObjectMeta.Namespace,
|
||||
},
|
||||
updatedGitHubSecret,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := updatedGitHubSecret.Data["update"]; !ok {
|
||||
return fmt.Errorf("secret update not yet present")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(Succeed(), "Failed to eventually figure out github secret data update")
|
||||
|
||||
Eventually(
|
||||
func() error {
|
||||
updatedListener := new(v1alpha1.AutoscalingListener)
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
client.ObjectKey{
|
||||
Name: scaleSetListenerName(autoscalingRunnerSet),
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
},
|
||||
listener,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if updatedListener.CreationTimestamp == listenerCreationTimestamp {
|
||||
return fmt.Errorf("creation timestamp not updated yet")
|
||||
}
|
||||
if updatedListener.Annotations[annotationKeyRunnerSpecHash] == listenerHash {
|
||||
return fmt.Errorf("hash not updated yet")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(Succeed(), "Listener should be re-created")
|
||||
|
||||
actionsClientAfterUpdate, err := controller.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get actions client")
|
||||
|
||||
Expect(actionsClientAfterUpdate.(*fake.FakeClient).ID).NotTo(BeEquivalentTo(actionsClient.(*fake.FakeClient).ID), "expected new client to be used")
|
||||
})
|
||||
})
|
||||
|
||||
Context("When updating an AutoscalingRunnerSet with running or pending jobs", func() {
|
||||
@@ -641,10 +551,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
|
||||
// Patch the AutoScalingRunnerSet image which should trigger
|
||||
// the recreation of the Listener and EphemeralRunnerSet
|
||||
patched := autoscalingRunnerSet.DeepCopy()
|
||||
if patched.ObjectMeta.Annotations == nil {
|
||||
patched.ObjectMeta.Annotations = make(map[string]string)
|
||||
if patched.Annotations == nil {
|
||||
patched.Annotations = make(map[string]string)
|
||||
}
|
||||
patched.ObjectMeta.Annotations[annotationKeyValuesHash] = "testgroup2"
|
||||
patched.Annotations[annotationKeyValuesHash] = "testgroup2"
|
||||
patched.Spec.Template.Spec = corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
@@ -772,33 +682,40 @@ var _ = Describe("Test AutoScalingController updates", Ordered, func() {
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
multiClient := fake.NewMultiClient(
|
||||
fake.WithDefaultClient(
|
||||
fake.NewFakeClient(
|
||||
fake.WithUpdateRunnerScaleSet(
|
||||
&actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
Name: "testset_update",
|
||||
RunnerGroupId: 1,
|
||||
RunnerGroupName: "testgroup",
|
||||
Labels: []actions.Label{{Type: "test", Name: "test"}},
|
||||
RunnerSetting: actions.RunnerSetting{},
|
||||
CreatedOn: time.Now(),
|
||||
RunnerJitConfigUrl: "test.test.test",
|
||||
Statistics: nil,
|
||||
},
|
||||
nil,
|
||||
),
|
||||
),
|
||||
nil,
|
||||
),
|
||||
)
|
||||
|
||||
controller := &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(
|
||||
fake.WithDefaultClient(
|
||||
fake.NewFakeClient(
|
||||
fake.WithUpdateRunnerScaleSet(
|
||||
&actions.RunnerScaleSet{
|
||||
Id: 1,
|
||||
Name: "testset_update",
|
||||
RunnerGroupId: 1,
|
||||
RunnerGroupName: "testgroup",
|
||||
Labels: []actions.Label{{Type: "test", Name: "test"}},
|
||||
RunnerSetting: actions.RunnerSetting{},
|
||||
CreatedOn: time.Now(),
|
||||
RunnerJitConfigUrl: "test.test.test",
|
||||
Statistics: nil,
|
||||
},
|
||||
nil,
|
||||
),
|
||||
),
|
||||
nil,
|
||||
),
|
||||
),
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: multiClient,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -913,7 +830,12 @@ var _ = Describe("Test AutoscalingController creation failures", Ordered, func()
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -970,7 +892,7 @@ var _ = Describe("Test AutoscalingController creation failures", Ordered, func()
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
|
||||
|
||||
ars.ObjectMeta.Annotations = make(map[string]string)
|
||||
ars.Annotations = make(map[string]string)
|
||||
err = k8sClient.Update(ctx, ars)
|
||||
Expect(err).NotTo(HaveOccurred(), "Update autoscaling runner set without annotation should be successful")
|
||||
|
||||
@@ -1032,14 +954,19 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||
ctx = context.Background()
|
||||
autoscalingNS, mgr = createNamespace(GinkgoT(), k8sClient)
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
multiClient := actions.NewMultiClient(logr.Discard())
|
||||
controller = &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: actions.NewMultiClient(logr.Discard()),
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: multiClient,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := controller.SetupWithManager(mgr)
|
||||
@@ -1222,7 +1149,12 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
}
|
||||
err = controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1231,7 +1163,10 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||
})
|
||||
|
||||
It("should be able to make requests to a server using root CAs", func() {
|
||||
controller.ActionsClient = actions.NewMultiClient(logr.Discard())
|
||||
controller.SecretResolver = &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: actions.NewMultiClient(logr.Discard()),
|
||||
}
|
||||
|
||||
certsFolder := filepath.Join(
|
||||
"../../",
|
||||
@@ -1266,7 +1201,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: server.ConfigURLForOrg("my-org"),
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
|
||||
GitHubServerTLS: &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1319,7 +1254,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
|
||||
GitHubServerTLS: &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1383,7 +1318,7 @@ var _ = Describe("Test client optional configuration", Ordered, func() {
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
|
||||
GitHubServerTLS: &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
@@ -1456,7 +1391,12 @@ var _ = Describe("Test external permissions cleanup", Ordered, func() {
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1614,7 +1554,12 @@ var _ = Describe("Test external permissions cleanup", Ordered, func() {
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1822,7 +1767,12 @@ var _ = Describe("Test resource version and build version mismatch", func() {
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -44,12 +45,24 @@ const (
|
||||
// EphemeralRunnerReconciler reconciles a EphemeralRunner object
|
||||
type EphemeralRunnerReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
ActionsClient actions.MultiClient
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
ResourceBuilder
|
||||
}
|
||||
|
||||
// precompute backoff durations for failed ephemeral runners
|
||||
// the len(failedRunnerBackoff) must be equal to maxFailures + 1
|
||||
var failedRunnerBackoff = []time.Duration{
|
||||
0,
|
||||
5 * time.Second,
|
||||
10 * time.Second,
|
||||
20 * time.Second,
|
||||
40 * time.Second,
|
||||
80 * time.Second,
|
||||
}
|
||||
|
||||
const maxFailures = 5
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -70,7 +83,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !ephemeralRunner.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !ephemeralRunner.DeletionTimestamp.IsZero() {
|
||||
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
@@ -173,6 +186,29 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
}
|
||||
}
|
||||
|
||||
if len(ephemeralRunner.Status.Failures) > maxFailures {
|
||||
log.Info(fmt.Sprintf("EphemeralRunner has failed more than %d times. Deleting ephemeral runner so it can be re-created", maxFailures))
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil {
|
||||
log.Error(fmt.Errorf("failed to delete ephemeral runner after %d failures: %w", maxFailures, err), "Failed to delete ephemeral runner")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
now := metav1.Now()
|
||||
lastFailure := ephemeralRunner.Status.LastFailure()
|
||||
backoffDuration := failedRunnerBackoff[len(ephemeralRunner.Status.Failures)]
|
||||
nextReconciliation := lastFailure.Add(backoffDuration)
|
||||
if !lastFailure.IsZero() && now.Before(&metav1.Time{Time: nextReconciliation}) {
|
||||
log.Info("Backing off the next reconciliation due to failure",
|
||||
"lastFailure", lastFailure,
|
||||
"nextReconciliation", nextReconciliation,
|
||||
"requeueAfter", nextReconciliation.Sub(now.Time),
|
||||
)
|
||||
return ctrl.Result{RequeueAfter: now.Sub(nextReconciliation)}, nil
|
||||
}
|
||||
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
@@ -196,39 +232,28 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
if err := r.Get(ctx, req.NamespacedName, pod); err != nil {
|
||||
switch {
|
||||
case !kerrors.IsNotFound(err):
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Failed to fetch the pod")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
case len(ephemeralRunner.Status.Failures) > 5:
|
||||
log.Info("EphemeralRunner has failed more than 5 times. Marking it as failed")
|
||||
errMessage := fmt.Sprintf("Pod has failed to start more than 5 times: %s", pod.Status.Message)
|
||||
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonTooManyPodFailures, log); err != nil {
|
||||
// Pod was not found. Create if the pod has never been created
|
||||
log.Info("Creating new EphemeralRunner pod.")
|
||||
result, err := r.createPod(ctx, ephemeralRunner, secret, log)
|
||||
switch {
|
||||
case err == nil:
|
||||
return result, nil
|
||||
case kerrors.IsInvalid(err) || kerrors.IsForbidden(err):
|
||||
log.Error(err, "Failed to create a pod due to unrecoverable failure")
|
||||
errMessage := fmt.Sprintf("Failed to create the pod: %v", err)
|
||||
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonInvalidPodFailure, log); err != nil {
|
||||
log.Error(err, "Failed to set ephemeral runner to phase Failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
|
||||
default:
|
||||
// Pod was not found. Create if the pod has never been created
|
||||
log.Info("Creating new EphemeralRunner pod.")
|
||||
result, err := r.createPod(ctx, ephemeralRunner, secret, log)
|
||||
switch {
|
||||
case err == nil:
|
||||
return result, nil
|
||||
case kerrors.IsInvalid(err) || kerrors.IsForbidden(err):
|
||||
log.Error(err, "Failed to create a pod due to unrecoverable failure")
|
||||
errMessage := fmt.Sprintf("Failed to create the pod: %v", err)
|
||||
if err := r.markAsFailed(ctx, ephemeralRunner, errMessage, ReasonInvalidPodFailure, log); err != nil {
|
||||
log.Error(err, "Failed to set ephemeral runner to phase Failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
default:
|
||||
log.Error(err, "Failed to create the pod")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
log.Error(err, "Failed to create the pod")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -319,7 +344,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, pod)
|
||||
switch {
|
||||
case err == nil:
|
||||
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if pod.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the runner pod")
|
||||
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
|
||||
return fmt.Errorf("failed to delete pod: %w", err)
|
||||
@@ -339,7 +364,7 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, secret)
|
||||
switch {
|
||||
case err == nil:
|
||||
if secret.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if secret.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the jitconfig secret")
|
||||
if err := r.Delete(ctx, secret); err != nil && !kerrors.IsNotFound(err) {
|
||||
return fmt.Errorf("failed to delete secret: %w", err)
|
||||
@@ -393,7 +418,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
|
||||
var errs []error
|
||||
for i := range runnerLinkedPodList.Items {
|
||||
linkedPod := &runnerLinkedPodList.Items[i]
|
||||
if !linkedPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !linkedPod.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -409,7 +434,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
|
||||
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||
runnerLinkedLabels := client.MatchingLabels(
|
||||
map[string]string{
|
||||
"runner-pod": ephemeralRunner.ObjectMeta.Name,
|
||||
"runner-pod": ephemeralRunner.Name,
|
||||
},
|
||||
)
|
||||
var runnerLinkedSecretList corev1.SecretList
|
||||
@@ -427,7 +452,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
|
||||
var errs []error
|
||||
for i := range runnerLinkedSecretList.Items {
|
||||
s := &runnerLinkedSecretList.Items[i]
|
||||
if !s.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !s.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -474,7 +499,7 @@ func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemera
|
||||
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
|
||||
// It should not be responsible for setting the status to Failed.
|
||||
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
||||
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if pod.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)
|
||||
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
|
||||
return fmt.Errorf("failed to delete pod with status failed: %w", err)
|
||||
@@ -484,9 +509,9 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
|
||||
log.Info("Updating ephemeral runner status to track the failure count")
|
||||
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
if obj.Status.Failures == nil {
|
||||
obj.Status.Failures = make(map[string]bool)
|
||||
obj.Status.Failures = make(map[string]metav1.Time)
|
||||
}
|
||||
obj.Status.Failures[string(pod.UID)] = true
|
||||
obj.Status.Failures[string(pod.UID)] = metav1.Now()
|
||||
obj.Status.Ready = false
|
||||
obj.Status.Reason = pod.Status.Reason
|
||||
obj.Status.Message = pod.Status.Message
|
||||
@@ -503,7 +528,7 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
|
||||
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
|
||||
// Runner is not registered with the service. We need to register it first
|
||||
log.Info("Creating ephemeral runner JIT config")
|
||||
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunner)
|
||||
actionsClient, err := r.GetActionsService(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %w", err)
|
||||
}
|
||||
@@ -640,7 +665,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
|
||||
}
|
||||
|
||||
log.Info("Creating new pod for ephemeral runner")
|
||||
newPod := r.ResourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...)
|
||||
newPod := r.newEphemeralRunnerPod(ctx, runner, secret, envs...)
|
||||
|
||||
if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil {
|
||||
log.Error(err, "Failed to set controller reference to a new pod")
|
||||
@@ -665,7 +690,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
|
||||
|
||||
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
|
||||
log.Info("Creating new secret for ephemeral runner")
|
||||
jitSecret := r.ResourceBuilder.newEphemeralRunnerJitSecret(runner)
|
||||
jitSecret := r.newEphemeralRunnerJitSecret(runner)
|
||||
|
||||
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
|
||||
return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %w", err)
|
||||
@@ -727,77 +752,10 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) (actions.ActionsService, error) {
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: runner.Spec.GitHubConfigSecret}, secret); err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret: %w", err)
|
||||
}
|
||||
|
||||
opts, err := r.actionsClientOptionsFor(ctx, runner)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get actions client options: %w", err)
|
||||
}
|
||||
|
||||
return r.ActionsClient.GetClientFromSecret(
|
||||
ctx,
|
||||
runner.Spec.GitHubConfigUrl,
|
||||
runner.Namespace,
|
||||
secret.Data,
|
||||
opts...,
|
||||
)
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) actionsClientOptionsFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) ([]actions.ClientOption, error) {
|
||||
var opts []actions.ClientOption
|
||||
if runner.Spec.Proxy != nil {
|
||||
proxyFunc, err := runner.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
|
||||
var secret corev1.Secret
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: s}, &secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get proxy secret %s: %w", s, err)
|
||||
}
|
||||
|
||||
return &secret, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get proxy func: %w", err)
|
||||
}
|
||||
|
||||
opts = append(opts, actions.WithProxy(proxyFunc))
|
||||
}
|
||||
|
||||
tlsConfig := runner.Spec.GitHubServerTLS
|
||||
if tlsConfig != nil {
|
||||
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
|
||||
var configmap corev1.ConfigMap
|
||||
err := r.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
Namespace: runner.Namespace,
|
||||
Name: name,
|
||||
},
|
||||
&configmap,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
|
||||
}
|
||||
|
||||
return []byte(configmap.Data[key]), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get tls config: %w", err)
|
||||
}
|
||||
|
||||
opts = append(opts, actions.WithRootCAs(pool))
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// runnerRegisteredWithService checks if the runner is still registered with the service
|
||||
// Returns found=false and err=nil if ephemeral runner does not exist in GitHub service and should be deleted
|
||||
func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (found bool, err error) {
|
||||
actionsClient, err := r.actionsClientFor(ctx, runner)
|
||||
actionsClient, err := r.GetActionsService(ctx, runner)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get Actions client for ScaleSet: %w", err)
|
||||
}
|
||||
@@ -824,7 +782,7 @@ func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Conte
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||
client, err := r.actionsClientFor(ctx, ephemeralRunner)
|
||||
client, err := r.GetActionsService(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get actions client for runner: %w", err)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
|
||||
const (
|
||||
ephemeralRunnerTimeout = time.Second * 20
|
||||
ephemeralRunnerInterval = time.Millisecond * 250
|
||||
ephemeralRunnerInterval = time.Millisecond * 10
|
||||
runnerImage = "ghcr.io/actions/actions-runner:latest"
|
||||
)
|
||||
|
||||
@@ -107,10 +107,15 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
controller = &EphemeralRunnerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := controller.SetupWithManager(mgr)
|
||||
@@ -528,44 +533,26 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
).Should(BeEquivalentTo(""))
|
||||
})
|
||||
|
||||
It("It should not re-create pod indefinitely", func() {
|
||||
It("It should eventually delete ephemeral runner after consecutive failures", func() {
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
if err != nil {
|
||||
if kerrors.IsNotFound(err) && len(updated.Status.Failures) > 5 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
err = k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
return false, fmt.Errorf("pod haven't failed for 5 times.")
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(true), "we should stop creating pod after 5 failures")
|
||||
).Should(Succeed(), "failed to get ephemeral runner")
|
||||
|
||||
failEphemeralRunnerPod := func() *corev1.Pod {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: updated.Name, Namespace: updated.Namespace}, pod)
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(Succeed(), "failed to get ephemeral runner pod")
|
||||
|
||||
// In case we still have pod created due to controller-runtime cache delay, mark the container as exited
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
if err == nil {
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: v1alpha1.EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
@@ -576,25 +563,70 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
})
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
// EphemeralRunner should failed with reason TooManyPodFailures
|
||||
Eventually(func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return updated.Status.Reason, nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures")
|
||||
for i := range 5 {
|
||||
pod := failEphemeralRunnerPod()
|
||||
|
||||
// EphemeralRunner should not have any pod
|
||||
Eventually(func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
return kerrors.IsNotFound(err), nil
|
||||
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(updated.Status.Failures), nil
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeEquivalentTo(i + 1))
|
||||
|
||||
Eventually(
|
||||
func() error {
|
||||
nextPod := new(corev1.Pod)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: pod.Name, Namespace: pod.Namespace}, nextPod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if nextPod.UID != pod.UID {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("pod not recreated")
|
||||
},
|
||||
).WithTimeout(20*time.Second).WithPolling(10*time.Millisecond).Should(Succeed(), "pod should be recreated")
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
pod := new(corev1.Pod)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, cs := range pod.Status.ContainerStatuses {
|
||||
if cs.Name == v1alpha1.EphemeralRunnerContainerName {
|
||||
return cs.State.Terminated == nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
},
|
||||
).WithTimeout(20*time.Second).WithPolling(10*time.Millisecond).Should(BeEquivalentTo(true), "pod should be terminated")
|
||||
}
|
||||
|
||||
failEphemeralRunnerPod()
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if kerrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
},
|
||||
ephemeralRunnerTimeout,
|
||||
ephemeralRunnerInterval,
|
||||
).Should(BeTrue(), "Ephemeral runner should eventually be deleted")
|
||||
})
|
||||
|
||||
It("It should re-create pod on eviction", func() {
|
||||
@@ -762,22 +794,27 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ActionsClient: fake.NewMultiClient(
|
||||
fake.WithDefaultClient(
|
||||
fake.NewFakeClient(
|
||||
fake.WithGetRunner(
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: fake.NewMultiClient(
|
||||
fake.WithDefaultClient(
|
||||
fake.NewFakeClient(
|
||||
fake.WithGetRunner(
|
||||
nil,
|
||||
&actions.ActionsError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Err: &actions.ActionsExceptionError{
|
||||
ExceptionName: "AgentNotFoundException",
|
||||
},
|
||||
},
|
||||
),
|
||||
),
|
||||
nil,
|
||||
&actions.ActionsError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Err: &actions.ActionsExceptionError{
|
||||
ExceptionName: "AgentNotFoundException",
|
||||
},
|
||||
},
|
||||
),
|
||||
),
|
||||
nil,
|
||||
),
|
||||
),
|
||||
},
|
||||
},
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).To(BeNil(), "failed to setup controller")
|
||||
@@ -834,10 +871,15 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoScalingNS.Name)
|
||||
|
||||
controller = &EphemeralRunnerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).To(BeNil(), "failed to setup controller")
|
||||
@@ -847,7 +889,12 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
|
||||
It("uses an actions client with proxy transport", func() {
|
||||
// Use an actual client
|
||||
controller.ActionsClient = actions.NewMultiClient(logr.Discard())
|
||||
controller.ResourceBuilder = ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: actions.NewMultiClient(logr.Discard()),
|
||||
},
|
||||
}
|
||||
|
||||
proxySuccessfulllyCalled := false
|
||||
proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -998,10 +1045,15 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs")
|
||||
|
||||
controller = &EphemeralRunnerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = controller.SetupWithManager(mgr)
|
||||
@@ -1032,11 +1084,16 @@ var _ = Describe("EphemeralRunner", func() {
|
||||
server.StartTLS()
|
||||
|
||||
// Use an actual client
|
||||
controller.ActionsClient = actions.NewMultiClient(logr.Discard())
|
||||
controller.ResourceBuilder = ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: actions.NewMultiClient(logr.Discard()),
|
||||
},
|
||||
}
|
||||
|
||||
ephemeralRunner := newExampleRunner("test-runner", autoScalingNS.Name, configSecret.Name)
|
||||
ephemeralRunner.Spec.GitHubConfigUrl = server.ConfigURLForOrg("my-org")
|
||||
ephemeralRunner.Spec.GitHubServerTLS = &v1alpha1.GitHubServerTLSConfig{
|
||||
ephemeralRunner.Spec.GitHubServerTLS = &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
|
||||
@@ -83,7 +83,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
|
||||
}
|
||||
|
||||
// Requested deletion does not need reconciled.
|
||||
if !ephemeralRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !ephemeralRunnerSet.DeletionTimestamp.IsZero() {
|
||||
if !controllerutil.ContainsFinalizer(ephemeralRunnerSet, ephemeralRunnerSetFinalizerName) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
@@ -331,7 +331,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
|
||||
return false, nil
|
||||
}
|
||||
|
||||
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunnerSet)
|
||||
actionsClient, err := r.GetActionsService(ctx, ephemeralRunnerSet)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -360,7 +360,7 @@ func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Contex
|
||||
// Track multiple errors at once and return the bundle.
|
||||
errs := make([]error, 0)
|
||||
for i := 0; i < count; i++ {
|
||||
ephemeralRunner := r.ResourceBuilder.newEphemeralRunner(runnerSet)
|
||||
ephemeralRunner := r.newEphemeralRunner(runnerSet)
|
||||
if runnerSet.Spec.EphemeralRunnerSpec.Proxy != nil {
|
||||
ephemeralRunner.Spec.ProxySecretRef = proxyEphemeralRunnerSetSecretName(runnerSet)
|
||||
}
|
||||
@@ -439,7 +439,7 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
|
||||
log.Info("No pending or running ephemeral runners running at this time for scale down")
|
||||
return nil
|
||||
}
|
||||
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunnerSet)
|
||||
actionsClient, err := r.GetActionsService(ctx, ephemeralRunnerSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create actions client for ephemeral runner replica set: %w", err)
|
||||
}
|
||||
@@ -502,73 +502,6 @@ func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ct
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) (actions.ActionsService, error) {
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: rs.Spec.EphemeralRunnerSpec.GitHubConfigSecret}, secret); err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret: %w", err)
|
||||
}
|
||||
|
||||
opts, err := r.actionsClientOptionsFor(ctx, rs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get actions client options: %w", err)
|
||||
}
|
||||
|
||||
return r.ActionsClient.GetClientFromSecret(
|
||||
ctx,
|
||||
rs.Spec.EphemeralRunnerSpec.GitHubConfigUrl,
|
||||
rs.Namespace,
|
||||
secret.Data,
|
||||
opts...,
|
||||
)
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) ([]actions.ClientOption, error) {
|
||||
var opts []actions.ClientOption
|
||||
if rs.Spec.EphemeralRunnerSpec.Proxy != nil {
|
||||
proxyFunc, err := rs.Spec.EphemeralRunnerSpec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
|
||||
var secret corev1.Secret
|
||||
err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: s}, &secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret %s: %w", s, err)
|
||||
}
|
||||
|
||||
return &secret, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get proxy func: %w", err)
|
||||
}
|
||||
|
||||
opts = append(opts, actions.WithProxy(proxyFunc))
|
||||
}
|
||||
|
||||
tlsConfig := rs.Spec.EphemeralRunnerSpec.GitHubServerTLS
|
||||
if tlsConfig != nil {
|
||||
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
|
||||
var configmap corev1.ConfigMap
|
||||
err := r.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
Namespace: rs.Namespace,
|
||||
Name: name,
|
||||
},
|
||||
&configmap,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
|
||||
}
|
||||
|
||||
return []byte(configmap.Data[key]), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get tls config: %w", err)
|
||||
}
|
||||
|
||||
opts = append(opts, actions.WithRootCAs(pool))
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
@@ -641,7 +574,7 @@ func newEphemeralRunnerState(ephemeralRunnerList *v1alpha1.EphemeralRunnerList)
|
||||
if err == nil && patchID > ephemeralRunnerState.latestPatchID {
|
||||
ephemeralRunnerState.latestPatchID = patchID
|
||||
}
|
||||
if !r.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !r.DeletionTimestamp.IsZero() {
|
||||
ephemeralRunnerState.deleting = append(ephemeralRunnerState.deleting, r)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -21,6 +22,7 @@ import (
|
||||
"github.com/go-logr/logr"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
@@ -35,6 +37,10 @@ const (
|
||||
ephemeralRunnerSetTestGitHubToken = "gh_token"
|
||||
)
|
||||
|
||||
func TestPrecomputedConstants(t *testing.T) {
|
||||
require.Equal(t, len(failedRunnerBackoff), maxFailures+1)
|
||||
}
|
||||
|
||||
var _ = Describe("Test EphemeralRunnerSet controller", func() {
|
||||
var ctx context.Context
|
||||
var mgr ctrl.Manager
|
||||
@@ -48,10 +54,15 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
controller := &EphemeralRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: fake.NewMultiClient(),
|
||||
},
|
||||
},
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1098,10 +1109,15 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
|
||||
configSecret = createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
controller := &EphemeralRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ActionsClient: actions.NewMultiClient(logr.Discard()),
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: actions.NewMultiClient(logr.Discard()),
|
||||
},
|
||||
},
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1397,10 +1413,15 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap with root CAs")
|
||||
|
||||
controller := &EphemeralRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ActionsClient: actions.NewMultiClient(logr.Discard()),
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ResourceBuilder: ResourceBuilder{
|
||||
SecretResolver: &SecretResolver{
|
||||
k8sClient: mgr.GetClient(),
|
||||
multiClient: actions.NewMultiClient(logr.Discard()),
|
||||
},
|
||||
},
|
||||
}
|
||||
err = controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
@@ -1439,7 +1460,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with custom root CA", func(
|
||||
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
|
||||
GitHubConfigUrl: server.ConfigURLForOrg("my-org"),
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
GitHubServerTLS: &v1alpha1.GitHubServerTLSConfig{
|
||||
GitHubServerTLS: &v1alpha1.TLSConfig{
|
||||
CertificateFrom: &v1alpha1.TLSCertificateSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
|
||||
@@ -5,17 +5,20 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
listenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||
ghalistenerconfig "github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/hash"
|
||||
"github.com/actions/actions-runner-controller/logging"
|
||||
"github.com/actions/actions-runner-controller/vault/azurekeyvault"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -71,6 +74,7 @@ func SetListenerEntrypoint(entrypoint string) {
|
||||
|
||||
type ResourceBuilder struct {
|
||||
ExcludeLabelPropagationPrefixes []string
|
||||
*SecretResolver
|
||||
}
|
||||
|
||||
// boolPtr returns a pointer to a bool value
|
||||
@@ -78,13 +82,7 @@ func boolPtr(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
func (b *ResourceBuilder) newAutoScalingListener(
|
||||
autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet,
|
||||
ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet,
|
||||
githubSecret *corev1.Secret,
|
||||
namespace, image string,
|
||||
imagePullSecrets []corev1.LocalObjectReference,
|
||||
) (*v1alpha1.AutoscalingListener, error) {
|
||||
func (b *ResourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -108,7 +106,7 @@ func (b *ResourceBuilder) newAutoScalingListener(
|
||||
})
|
||||
|
||||
annotations := map[string]string{
|
||||
annotationKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(githubSecret),
|
||||
annotationKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
|
||||
annotationKeyValuesHash: autoscalingRunnerSet.Annotations[annotationKeyValuesHash],
|
||||
}
|
||||
|
||||
@@ -126,6 +124,7 @@ func (b *ResourceBuilder) newAutoScalingListener(
|
||||
Spec: v1alpha1.AutoscalingListenerSpec{
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
VaultConfig: autoscalingRunnerSet.VaultConfig(),
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
|
||||
@@ -165,7 +164,7 @@ func (lm *listenerMetricsServerConfig) containerPort() (corev1.ContainerPort, er
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
|
||||
func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, appConfig *appconfig.AppConfig, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
|
||||
var (
|
||||
metricsAddr = ""
|
||||
metricsEndpoint = ""
|
||||
@@ -175,30 +174,8 @@ func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
|
||||
metricsEndpoint = metricsConfig.endpoint
|
||||
}
|
||||
|
||||
var appID int64
|
||||
if id, ok := secret.Data["github_app_id"]; ok {
|
||||
var err error
|
||||
appID, err = strconv.ParseInt(string(id), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert github_app_id to int: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var appInstallationID int64
|
||||
if id, ok := secret.Data["github_app_installation_id"]; ok {
|
||||
var err error
|
||||
appInstallationID, err = strconv.ParseInt(string(id), 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert github_app_installation_id to int: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
config := listenerconfig.Config{
|
||||
config := ghalistenerconfig.Config{
|
||||
ConfigureUrl: autoscalingListener.Spec.GitHubConfigUrl,
|
||||
AppID: appID,
|
||||
AppInstallationID: appInstallationID,
|
||||
AppPrivateKey: string(secret.Data["github_app_private_key"]),
|
||||
Token: string(secret.Data["github_token"]),
|
||||
EphemeralRunnerSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
EphemeralRunnerSetName: autoscalingListener.Spec.EphemeralRunnerSetName,
|
||||
MaxRunners: autoscalingListener.Spec.MaxRunners,
|
||||
@@ -213,6 +190,24 @@ func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
|
||||
Metrics: autoscalingListener.Spec.Metrics,
|
||||
}
|
||||
|
||||
vault := autoscalingListener.Spec.VaultConfig
|
||||
if vault == nil {
|
||||
config.AppConfig = appConfig
|
||||
} else {
|
||||
config.VaultType = vault.Type
|
||||
config.VaultLookupKey = autoscalingListener.Spec.GitHubConfigSecret
|
||||
config.AzureKeyVaultConfig = &azurekeyvault.Config{
|
||||
TenantID: vault.AzureKeyVault.TenantID,
|
||||
ClientID: vault.AzureKeyVault.ClientID,
|
||||
URL: vault.AzureKeyVault.URL,
|
||||
CertificatePath: vault.AzureKeyVault.CertificatePath,
|
||||
}
|
||||
}
|
||||
|
||||
if err := config.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("invalid listener config: %w", err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := json.NewEncoder(&buf).Encode(config); err != nil {
|
||||
return nil, fmt.Errorf("failed to encode config: %w", err)
|
||||
@@ -229,7 +224,7 @@ func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
|
||||
func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
|
||||
listenerEnv := []corev1.EnvVar{
|
||||
{
|
||||
Name: "LISTENER_CONFIG_PATH",
|
||||
@@ -284,9 +279,7 @@ func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
||||
}
|
||||
|
||||
labels := make(map[string]string, len(autoscalingListener.Labels))
|
||||
for key, val := range autoscalingListener.Labels {
|
||||
labels[key] = val
|
||||
}
|
||||
maps.Copy(labels, autoscalingListener.Labels)
|
||||
|
||||
newRunnerScaleSetListenerPod := &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -435,7 +428,7 @@ func mergeListenerContainer(base, from *corev1.Container) {
|
||||
func (b *ResourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
|
||||
return &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerServiceAccountName(autoscalingListener),
|
||||
Name: autoscalingListener.Name,
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
@@ -450,7 +443,7 @@ func (b *ResourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.
|
||||
rulesHash := hash.ComputeTemplateHash(&rules)
|
||||
newRole := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerRoleName(autoscalingListener),
|
||||
Name: autoscalingListener.Name,
|
||||
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
@@ -484,7 +477,7 @@ func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
|
||||
|
||||
newRoleBinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerRoleName(autoscalingListener),
|
||||
Name: autoscalingListener.Name,
|
||||
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
@@ -502,25 +495,6 @@ func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
|
||||
return newRoleBinding
|
||||
}
|
||||
|
||||
func (b *ResourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
|
||||
dataHash := hash.ComputeTemplateHash(&secret.Data)
|
||||
|
||||
newListenerSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerSecretMirrorName(autoscalingListener),
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||
"secret-data-hash": dataHash,
|
||||
}),
|
||||
},
|
||||
Data: secret.DeepCopy().Data,
|
||||
}
|
||||
|
||||
return newListenerSecret
|
||||
}
|
||||
|
||||
func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
if err != nil {
|
||||
@@ -549,8 +523,8 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
|
||||
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
|
||||
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
|
||||
GenerateName: autoscalingRunnerSet.Name + "-",
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Labels: labels,
|
||||
Annotations: newAnnotations,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
@@ -573,6 +547,7 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
|
||||
VaultConfig: autoscalingRunnerSet.VaultConfig(),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -594,6 +569,7 @@ func (b *ResourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme
|
||||
for key, val := range ephemeralRunnerSet.Annotations {
|
||||
annotations[key] = val
|
||||
}
|
||||
|
||||
annotations[AnnotationKeyPatchID] = strconv.Itoa(ephemeralRunnerSet.Spec.PatchID)
|
||||
return &v1alpha1.EphemeralRunner{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
@@ -623,18 +599,18 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
|
||||
labels := map[string]string{}
|
||||
annotations := map[string]string{}
|
||||
|
||||
for k, v := range runner.ObjectMeta.Labels {
|
||||
for k, v := range runner.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
for k, v := range runner.Spec.PodTemplateSpec.Labels {
|
||||
for k, v := range runner.Spec.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue)
|
||||
|
||||
for k, v := range runner.ObjectMeta.Annotations {
|
||||
for k, v := range runner.Annotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
for k, v := range runner.Spec.PodTemplateSpec.Annotations {
|
||||
for k, v := range runner.Spec.Annotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
|
||||
@@ -646,8 +622,8 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
|
||||
)
|
||||
|
||||
objectMeta := metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
@@ -663,10 +639,10 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
|
||||
}
|
||||
|
||||
newPod.ObjectMeta = objectMeta
|
||||
newPod.Spec = runner.Spec.PodTemplateSpec.Spec
|
||||
newPod.Spec.Containers = make([]corev1.Container, 0, len(runner.Spec.PodTemplateSpec.Spec.Containers))
|
||||
newPod.Spec = runner.Spec.Spec
|
||||
newPod.Spec.Containers = make([]corev1.Container, 0, len(runner.Spec.Spec.Containers))
|
||||
|
||||
for _, c := range runner.Spec.PodTemplateSpec.Spec.Containers {
|
||||
for _, c := range runner.Spec.Spec.Containers {
|
||||
if c.Name == v1alpha1.EphemeralRunnerContainerName {
|
||||
c.Env = append(
|
||||
c.Env,
|
||||
@@ -719,30 +695,6 @@ func scaleSetListenerName(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) s
|
||||
return fmt.Sprintf("%v-%v-listener", autoscalingRunnerSet.Name, namespaceHash)
|
||||
}
|
||||
|
||||
func scaleSetListenerServiceAccountName(autoscalingListener *v1alpha1.AutoscalingListener) string {
|
||||
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
|
||||
if len(namespaceHash) > 8 {
|
||||
namespaceHash = namespaceHash[:8]
|
||||
}
|
||||
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
|
||||
}
|
||||
|
||||
func scaleSetListenerRoleName(autoscalingListener *v1alpha1.AutoscalingListener) string {
|
||||
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
|
||||
if len(namespaceHash) > 8 {
|
||||
namespaceHash = namespaceHash[:8]
|
||||
}
|
||||
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
|
||||
}
|
||||
|
||||
func scaleSetListenerSecretMirrorName(autoscalingListener *v1alpha1.AutoscalingListener) string {
|
||||
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
|
||||
if len(namespaceHash) > 8 {
|
||||
namespaceHash = namespaceHash[:8]
|
||||
}
|
||||
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
|
||||
}
|
||||
|
||||
func proxyListenerSecretName(autoscalingListener *v1alpha1.AutoscalingListener) string {
|
||||
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
|
||||
if len(namespaceHash) > 8 {
|
||||
|
||||
@@ -59,24 +59,7 @@ func TestLabelPropagation(t *testing.T) {
|
||||
assert.Equal(t, autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName], ephemeralRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName])
|
||||
assert.Equal(t, autoscalingRunnerSet.Labels["arbitrary-label"], ephemeralRunnerSet.Labels["arbitrary-label"])
|
||||
|
||||
githubSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-scale-set",
|
||||
Namespace: "test-ns",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"github_token": []byte("github_token"),
|
||||
},
|
||||
}
|
||||
|
||||
listener, err := b.newAutoScalingListener(
|
||||
&autoscalingRunnerSet,
|
||||
ephemeralRunnerSet,
|
||||
githubSecret,
|
||||
autoscalingRunnerSet.Namespace,
|
||||
"test:latest",
|
||||
nil,
|
||||
)
|
||||
listener, err := b.newAutoScalingListener(&autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, labelValueKubernetesPartOf, listener.Labels[LabelKeyKubernetesPartOf])
|
||||
assert.Equal(t, "runner-scale-set-listener", listener.Labels[LabelKeyKubernetesComponent])
|
||||
@@ -99,12 +82,7 @@ func TestLabelPropagation(t *testing.T) {
|
||||
Name: "test",
|
||||
},
|
||||
}
|
||||
listenerSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
}
|
||||
listenerPod, err := b.newScaleSetListenerPod(listener, &corev1.Secret{}, listenerServiceAccount, listenerSecret, nil)
|
||||
listenerPod, err := b.newScaleSetListenerPod(listener, &corev1.Secret{}, listenerServiceAccount, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, listenerPod.Labels, listener.Labels)
|
||||
|
||||
@@ -137,16 +115,6 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
|
||||
organization := strings.Repeat("b", 64)
|
||||
repository := strings.Repeat("c", 64)
|
||||
|
||||
githubSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-scale-set",
|
||||
Namespace: "test-ns",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"github_token": []byte("github_token"),
|
||||
},
|
||||
}
|
||||
|
||||
autoscalingRunnerSet := v1alpha1.AutoscalingRunnerSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-scale-set",
|
||||
@@ -178,14 +146,7 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
|
||||
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization], trimLabelVauleSuffix))
|
||||
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubRepository], trimLabelVauleSuffix))
|
||||
|
||||
listener, err := b.newAutoScalingListener(
|
||||
autoscalingRunnerSet,
|
||||
ephemeralRunnerSet,
|
||||
githubSecret,
|
||||
autoscalingRunnerSet.Namespace,
|
||||
"test:latest",
|
||||
nil,
|
||||
)
|
||||
listener, err := b.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, listener.Labels[LabelKeyGitHubEnterprise], 0)
|
||||
assert.Len(t, listener.Labels[LabelKeyGitHubOrganization], 63)
|
||||
@@ -208,14 +169,7 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
|
||||
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubOrganization], 0)
|
||||
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubRepository], 0)
|
||||
|
||||
listener, err := b.newAutoScalingListener(
|
||||
autoscalingRunnerSet,
|
||||
ephemeralRunnerSet,
|
||||
githubSecret,
|
||||
autoscalingRunnerSet.Namespace,
|
||||
"test:latest",
|
||||
nil,
|
||||
)
|
||||
listener, err := b.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, autoscalingRunnerSet.Namespace, "test:latest", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, listener.Labels[LabelKeyGitHubEnterprise], 63)
|
||||
assert.True(t, strings.HasSuffix(ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], trimLabelVauleSuffix))
|
||||
|
||||
280
controllers/actions.github.com/secret_resolver.go
Normal file
280
controllers/actions.github.com/secret_resolver.go
Normal file
@@ -0,0 +1,280 @@
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/actions/actions-runner-controller/vault"
|
||||
"github.com/actions/actions-runner-controller/vault/azurekeyvault"
|
||||
"golang.org/x/net/http/httpproxy"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type SecretResolver struct {
|
||||
k8sClient client.Client
|
||||
multiClient actions.MultiClient
|
||||
}
|
||||
|
||||
type SecretResolverOption func(*SecretResolver)
|
||||
|
||||
func NewSecretResolver(k8sClient client.Client, multiClient actions.MultiClient, opts ...SecretResolverOption) *SecretResolver {
|
||||
if k8sClient == nil {
|
||||
panic("k8sClient must not be nil")
|
||||
}
|
||||
|
||||
secretResolver := &SecretResolver{
|
||||
k8sClient: k8sClient,
|
||||
multiClient: multiClient,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(secretResolver)
|
||||
}
|
||||
|
||||
return secretResolver
|
||||
}
|
||||
|
||||
type ActionsGitHubObject interface {
|
||||
client.Object
|
||||
GitHubConfigUrl() string
|
||||
GitHubConfigSecret() string
|
||||
GitHubProxy() *v1alpha1.ProxyConfig
|
||||
GitHubServerTLS() *v1alpha1.TLSConfig
|
||||
VaultConfig() *v1alpha1.VaultConfig
|
||||
VaultProxy() *v1alpha1.ProxyConfig
|
||||
}
|
||||
|
||||
func (sr *SecretResolver) GetAppConfig(ctx context.Context, obj ActionsGitHubObject) (*appconfig.AppConfig, error) {
|
||||
resolver, err := sr.resolverForObject(ctx, obj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get resolver for object: %v", err)
|
||||
}
|
||||
|
||||
appConfig, err := resolver.appConfig(ctx, obj.GitHubConfigSecret())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve app config: %v", err)
|
||||
}
|
||||
|
||||
return appConfig, nil
|
||||
}
|
||||
|
||||
func (sr *SecretResolver) GetActionsService(ctx context.Context, obj ActionsGitHubObject) (actions.ActionsService, error) {
|
||||
resolver, err := sr.resolverForObject(ctx, obj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get resolver for object: %v", err)
|
||||
}
|
||||
|
||||
appConfig, err := resolver.appConfig(ctx, obj.GitHubConfigSecret())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve app config: %v", err)
|
||||
}
|
||||
|
||||
var clientOptions []actions.ClientOption
|
||||
if proxy := obj.GitHubProxy(); proxy != nil {
|
||||
config := &httpproxy.Config{
|
||||
NoProxy: strings.Join(proxy.NoProxy, ","),
|
||||
}
|
||||
|
||||
if proxy.HTTP != nil {
|
||||
u, err := url.Parse(proxy.HTTP.Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse proxy http url %q: %w", proxy.HTTP.Url, err)
|
||||
}
|
||||
|
||||
if ref := proxy.HTTP.CredentialSecretRef; ref != "" {
|
||||
u.User, err = resolver.proxyCredentials(ctx, ref)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve proxy credentials: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
config.HTTPProxy = u.String()
|
||||
}
|
||||
|
||||
if proxy.HTTPS != nil {
|
||||
u, err := url.Parse(proxy.HTTPS.Url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse proxy https url %q: %w", proxy.HTTPS.Url, err)
|
||||
}
|
||||
|
||||
if ref := proxy.HTTPS.CredentialSecretRef; ref != "" {
|
||||
u.User, err = resolver.proxyCredentials(ctx, ref)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve proxy credentials: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
config.HTTPSProxy = u.String()
|
||||
}
|
||||
|
||||
proxyFunc := func(req *http.Request) (*url.URL, error) {
|
||||
return config.ProxyFunc()(req.URL)
|
||||
}
|
||||
|
||||
clientOptions = append(clientOptions, actions.WithProxy(proxyFunc))
|
||||
}
|
||||
|
||||
tlsConfig := obj.GitHubServerTLS()
|
||||
if tlsConfig != nil {
|
||||
pool, err := tlsConfig.ToCertPool(func(name, key string) ([]byte, error) {
|
||||
var configmap corev1.ConfigMap
|
||||
err := sr.k8sClient.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
Namespace: obj.GetNamespace(),
|
||||
Name: name,
|
||||
},
|
||||
&configmap,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get configmap %s: %w", name, err)
|
||||
}
|
||||
|
||||
return []byte(configmap.Data[key]), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get tls config: %w", err)
|
||||
}
|
||||
|
||||
clientOptions = append(clientOptions, actions.WithRootCAs(pool))
|
||||
}
|
||||
|
||||
return sr.multiClient.GetClientFor(
|
||||
ctx,
|
||||
obj.GitHubConfigUrl(),
|
||||
appConfig,
|
||||
obj.GetNamespace(),
|
||||
clientOptions...,
|
||||
)
|
||||
}
|
||||
|
||||
func (sr *SecretResolver) resolverForObject(ctx context.Context, obj ActionsGitHubObject) (resolver, error) {
|
||||
vaultConfig := obj.VaultConfig()
|
||||
if vaultConfig == nil || vaultConfig.Type == "" {
|
||||
return &k8sResolver{
|
||||
namespace: obj.GetNamespace(),
|
||||
client: sr.k8sClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var proxy *httpproxy.Config
|
||||
if vaultProxy := obj.VaultProxy(); vaultProxy != nil {
|
||||
p, err := vaultProxy.ToHTTPProxyConfig(func(s string) (*corev1.Secret, error) {
|
||||
var secret corev1.Secret
|
||||
err := sr.k8sClient.Get(ctx, types.NamespacedName{Name: s, Namespace: obj.GetNamespace()}, &secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret %s: %w", s, err)
|
||||
}
|
||||
return &secret, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create proxy config: %v", err)
|
||||
}
|
||||
proxy = p
|
||||
}
|
||||
|
||||
switch vaultConfig.Type {
|
||||
case vault.VaultTypeAzureKeyVault:
|
||||
akv, err := azurekeyvault.New(azurekeyvault.Config{
|
||||
TenantID: vaultConfig.AzureKeyVault.TenantID,
|
||||
ClientID: vaultConfig.AzureKeyVault.ClientID,
|
||||
URL: vaultConfig.AzureKeyVault.URL,
|
||||
CertificatePath: vaultConfig.AzureKeyVault.CertificatePath,
|
||||
Proxy: proxy,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure Key Vault client: %v", err)
|
||||
}
|
||||
return &vaultResolver{
|
||||
vault: akv,
|
||||
}, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown vault type %q", vaultConfig.Type)
|
||||
}
|
||||
}
|
||||
|
||||
type resolver interface {
|
||||
appConfig(ctx context.Context, key string) (*appconfig.AppConfig, error)
|
||||
proxyCredentials(ctx context.Context, key string) (*url.Userinfo, error)
|
||||
}
|
||||
|
||||
type k8sResolver struct {
|
||||
namespace string
|
||||
client client.Client
|
||||
}
|
||||
|
||||
func (r *k8sResolver) appConfig(ctx context.Context, key string) (*appconfig.AppConfig, error) {
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: r.namespace,
|
||||
Name: key,
|
||||
}
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.client.Get(
|
||||
ctx,
|
||||
nsName,
|
||||
secret,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed to get kubernetes secret: %q", nsName.String())
|
||||
}
|
||||
|
||||
return appconfig.FromSecret(secret)
|
||||
}
|
||||
|
||||
func (r *k8sResolver) proxyCredentials(ctx context.Context, key string) (*url.Userinfo, error) {
|
||||
nsName := types.NamespacedName{Namespace: r.namespace, Name: key}
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.client.Get(
|
||||
ctx,
|
||||
nsName,
|
||||
secret,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed to get kubernetes secret: %q", nsName.String())
|
||||
}
|
||||
|
||||
return url.UserPassword(
|
||||
string(secret.Data["username"]),
|
||||
string(secret.Data["password"]),
|
||||
), nil
|
||||
}
|
||||
|
||||
type vaultResolver struct {
|
||||
vault vault.Vault
|
||||
}
|
||||
|
||||
func (r *vaultResolver) appConfig(ctx context.Context, key string) (*appconfig.AppConfig, error) {
|
||||
val, err := r.vault.GetSecret(ctx, key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve secret: %v", err)
|
||||
}
|
||||
|
||||
return appconfig.FromJSONString(val)
|
||||
}
|
||||
|
||||
func (r *vaultResolver) proxyCredentials(ctx context.Context, key string) (*url.Userinfo, error) {
|
||||
val, err := r.vault.GetSecret(ctx, key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve secret: %v", err)
|
||||
}
|
||||
|
||||
type info struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
var i info
|
||||
if err := json.Unmarshal([]byte(val), &i); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal info: %v", err)
|
||||
}
|
||||
|
||||
return url.UserPassword(i.Username, i.Password), nil
|
||||
}
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
|
||||
@@ -79,6 +80,15 @@ var _ = BeforeSuite(func() {
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
|
||||
failedRunnerBackoff = []time.Duration{
|
||||
20 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
20 * time.Millisecond,
|
||||
}
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
|
||||
@@ -130,7 +130,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
||||
jobs, resp, err := ghc.Actions.ListWorkflowJobs(context.TODO(), user, repoName, runID, &opt)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Error listing workflow jobs")
|
||||
return //err
|
||||
return // err
|
||||
}
|
||||
allJobs = append(allJobs, jobs.Jobs...)
|
||||
if resp.NextPage == 0 {
|
||||
@@ -345,7 +345,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
||||
}
|
||||
|
||||
var runnerPodList corev1.PodList
|
||||
if err := r.Client.List(ctx, &runnerPodList, client.InNamespace(hra.Namespace), client.MatchingLabels(map[string]string{
|
||||
if err := r.List(ctx, &runnerPodList, client.InNamespace(hra.Namespace), client.MatchingLabels(map[string]string{
|
||||
kindLabel: hra.Spec.ScaleTargetRef.Name,
|
||||
})); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -29,7 +29,7 @@ func newGithubClient(server *httptest.Server) *github.Client {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
client.Client.BaseURL = baseURL
|
||||
client.BaseURL = baseURL
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
@@ -82,8 +82,8 @@ func (s *batchScaler) Add(st *ScaleTarget) {
|
||||
break batch
|
||||
case st := <-s.queue:
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: st.HorizontalRunnerAutoscaler.Namespace,
|
||||
Name: st.HorizontalRunnerAutoscaler.Name,
|
||||
Namespace: st.Namespace,
|
||||
Name: st.Name,
|
||||
}
|
||||
b, ok := batches[nsName]
|
||||
if !ok {
|
||||
@@ -208,7 +208,7 @@ func (s *batchScaler) planBatchScale(ctx context.Context, batch batchScaleOperat
|
||||
//
|
||||
// In other words, updating HRA.spec.scaleTriggers[].duration does not result in delaying capacity reservations expiration any longer
|
||||
// than the "intended" duration, which is the duration of the trigger when the reservation was created.
|
||||
duration := copy.Spec.CapacityReservations[i].ExpirationTime.Time.Sub(copy.Spec.CapacityReservations[i].EffectiveTime.Time)
|
||||
duration := copy.Spec.CapacityReservations[i].ExpirationTime.Sub(copy.Spec.CapacityReservations[i].EffectiveTime.Time)
|
||||
copy.Spec.CapacityReservations[i].EffectiveTime = metav1.Time{Time: now}
|
||||
copy.Spec.CapacityReservations[i].ExpirationTime = metav1.Time{Time: now.Add(duration)}
|
||||
}
|
||||
|
||||
@@ -503,13 +503,13 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getManagedRunnerGroup
|
||||
switch kind {
|
||||
case "RunnerSet":
|
||||
var rs v1alpha1.RunnerSet
|
||||
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||
return groups, err
|
||||
}
|
||||
o, e, g = rs.Spec.Organization, rs.Spec.Enterprise, rs.Spec.Group
|
||||
case "RunnerDeployment", "":
|
||||
var rd v1alpha1.RunnerDeployment
|
||||
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||
return groups, err
|
||||
}
|
||||
o, e, g = rd.Spec.Template.Spec.Organization, rd.Spec.Template.Spec.Enterprise, rd.Spec.Template.Spec.Group
|
||||
@@ -562,7 +562,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getJobScaleTarget(ctx
|
||||
|
||||
HRA:
|
||||
for _, hra := range hras {
|
||||
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !hra.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -603,7 +603,7 @@ HRA:
|
||||
case "RunnerSet":
|
||||
var rs v1alpha1.RunnerSet
|
||||
|
||||
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -634,7 +634,7 @@ HRA:
|
||||
case "RunnerDeployment", "":
|
||||
var rd v1alpha1.RunnerDeployment
|
||||
|
||||
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -676,7 +676,7 @@ func getValidCapacityReservations(autoscaler *v1alpha1.HorizontalRunnerAutoscale
|
||||
now := time.Now()
|
||||
|
||||
for _, reservation := range autoscaler.Spec.CapacityReservations {
|
||||
if reservation.ExpirationTime.Time.After(now) {
|
||||
if reservation.ExpirationTime.After(now) {
|
||||
capacityReservations = append(capacityReservations, reservation)
|
||||
}
|
||||
}
|
||||
@@ -713,7 +713,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) indexer(rawObj client
|
||||
switch hra.Spec.ScaleTargetRef.Kind {
|
||||
case "", "RunnerDeployment":
|
||||
var rd v1alpha1.RunnerDeployment
|
||||
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||
autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerDeployment not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name))
|
||||
return nil
|
||||
}
|
||||
@@ -740,7 +740,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) indexer(rawObj client
|
||||
return keys
|
||||
case "RunnerSet":
|
||||
var rs v1alpha1.RunnerSet
|
||||
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||
if err := autoscaler.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||
autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerSet not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !hra.DeletionTimestamp.IsZero() {
|
||||
r.GitHubClient.DeinitForHRA(&hra)
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
@@ -91,7 +91,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !rd.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !rd.DeletionTimestamp.IsZero() {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -120,14 +120,14 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
||||
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
|
||||
}
|
||||
|
||||
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
|
||||
if err := r.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
|
||||
return fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err)
|
||||
}
|
||||
} else if ephemeral && effectiveTime != nil {
|
||||
copy := rd.DeepCopy()
|
||||
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
|
||||
|
||||
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
|
||||
if err := r.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
|
||||
return fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err)
|
||||
}
|
||||
}
|
||||
@@ -142,7 +142,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !rs.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !rs.DeletionTimestamp.IsZero() {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -160,7 +160,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
||||
org: rs.Spec.Organization,
|
||||
repo: rs.Spec.Repository,
|
||||
replicas: replicas,
|
||||
labels: rs.Spec.RunnerConfig.Labels,
|
||||
labels: rs.Spec.Labels,
|
||||
getRunnerMap: func() (map[string]struct{}, error) {
|
||||
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
||||
var runnerPodList corev1.PodList
|
||||
@@ -224,14 +224,14 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
||||
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
|
||||
}
|
||||
|
||||
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
|
||||
if err := r.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
|
||||
return fmt.Errorf("patching runnerset to have %d replicas: %w", newDesiredReplicas, err)
|
||||
}
|
||||
} else if ephemeral && effectiveTime != nil {
|
||||
copy := rs.DeepCopy()
|
||||
copy.Spec.EffectiveTime = &metav1.Time{Time: *effectiveTime}
|
||||
|
||||
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
|
||||
if err := r.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
|
||||
return fmt.Errorf("patching runnerset to have %d replicas: %w", newDesiredReplicas, err)
|
||||
}
|
||||
}
|
||||
@@ -253,7 +253,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) scaleTargetFromRD(ctx context.Con
|
||||
org: rd.Spec.Template.Spec.Organization,
|
||||
repo: rd.Spec.Template.Spec.Repository,
|
||||
replicas: rd.Spec.Replicas,
|
||||
labels: rd.Spec.Template.Spec.RunnerConfig.Labels,
|
||||
labels: rd.Spec.Template.Spec.Labels,
|
||||
getRunnerMap: func() (map[string]struct{}, error) {
|
||||
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
||||
var runnerList v1alpha1.RunnerList
|
||||
@@ -484,7 +484,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(ghc *arc
|
||||
var reserved int
|
||||
|
||||
for _, reservation := range hra.Spec.CapacityReservations {
|
||||
if reservation.ExpirationTime.Time.After(now) {
|
||||
if reservation.ExpirationTime.After(now) {
|
||||
reserved += reservation.Replicas
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,12 +20,13 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
"github.com/actions/actions-runner-controller/build"
|
||||
"github.com/actions/actions-runner-controller/hash"
|
||||
"github.com/go-logr/logr"
|
||||
@@ -107,12 +108,12 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if runner.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
finalizers, added := addFinalizer(runner.ObjectMeta.Finalizers, finalizerName)
|
||||
if runner.DeletionTimestamp.IsZero() {
|
||||
finalizers, added := addFinalizer(runner.Finalizers, finalizerName)
|
||||
|
||||
if added {
|
||||
newRunner := runner.DeepCopy()
|
||||
newRunner.ObjectMeta.Finalizers = finalizers
|
||||
newRunner.Finalizers = finalizers
|
||||
|
||||
if err := r.Update(ctx, newRunner); err != nil {
|
||||
log.Error(err, "Failed to update runner")
|
||||
@@ -271,11 +272,11 @@ func ephemeralRunnerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {
|
||||
}
|
||||
|
||||
func (r *RunnerReconciler) processRunnerDeletion(runner v1alpha1.Runner, ctx context.Context, log logr.Logger, pod *corev1.Pod) (reconcile.Result, error) {
|
||||
finalizers, removed := removeFinalizer(runner.ObjectMeta.Finalizers, finalizerName)
|
||||
finalizers, removed := removeFinalizer(runner.Finalizers, finalizerName)
|
||||
|
||||
if removed {
|
||||
newRunner := runner.DeepCopy()
|
||||
newRunner.ObjectMeta.Finalizers = finalizers
|
||||
newRunner.Finalizers = finalizers
|
||||
|
||||
if err := r.Patch(ctx, newRunner, client.MergeFrom(&runner)); err != nil {
|
||||
log.Error(err, "Unable to remove finalizer")
|
||||
@@ -305,8 +306,8 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
||||
if needsServiceAccount {
|
||||
serviceAccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
},
|
||||
}
|
||||
if res := r.createObject(ctx, serviceAccount, serviceAccount.ObjectMeta, &runner, log); res != nil {
|
||||
@@ -321,7 +322,7 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
||||
APIGroups: []string{"actions.summerwind.dev"},
|
||||
Resources: []string{"runners/status"},
|
||||
Verbs: []string{"get", "update", "patch"},
|
||||
ResourceNames: []string{runner.ObjectMeta.Name},
|
||||
ResourceNames: []string{runner.Name},
|
||||
},
|
||||
}...)
|
||||
}
|
||||
@@ -359,8 +360,8 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
||||
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
},
|
||||
Rules: rules,
|
||||
}
|
||||
@@ -370,19 +371,19 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
||||
|
||||
roleBinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Name: runner.Name,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -482,7 +483,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
|
||||
labels := map[string]string{}
|
||||
|
||||
for k, v := range runner.ObjectMeta.Labels {
|
||||
for k, v := range runner.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
|
||||
@@ -511,8 +512,8 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
//
|
||||
// See https://github.com/actions/actions-runner-controller/issues/143 for more context.
|
||||
labels[LabelKeyPodTemplateHash] = hash.FNVHashStringObjects(
|
||||
filterLabels(runner.ObjectMeta.Labels, LabelKeyRunnerTemplateHash),
|
||||
runner.ObjectMeta.Annotations,
|
||||
filterLabels(runner.Labels, LabelKeyRunnerTemplateHash),
|
||||
runner.Annotations,
|
||||
runner.Spec,
|
||||
ghc.GithubBaseURL,
|
||||
// Token change should trigger replacement.
|
||||
@@ -523,10 +524,10 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
)
|
||||
|
||||
objectMeta := metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
Labels: labels,
|
||||
Annotations: runner.ObjectMeta.Annotations,
|
||||
Annotations: runner.Annotations,
|
||||
}
|
||||
|
||||
template.ObjectMeta = objectMeta
|
||||
@@ -649,7 +650,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
if runnerSpec.ServiceAccountName != "" {
|
||||
pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName
|
||||
} else if r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes" {
|
||||
pod.Spec.ServiceAccountName = runner.ObjectMeta.Name
|
||||
pod.Spec.ServiceAccountName = runner.Name
|
||||
}
|
||||
|
||||
if runnerSpec.AutomountServiceAccountToken != nil {
|
||||
@@ -704,7 +705,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
pod.Spec.RuntimeClassName = runnerSpec.RuntimeClassName
|
||||
}
|
||||
|
||||
pod.ObjectMeta.Name = runner.ObjectMeta.Name
|
||||
pod.Name = runner.Name
|
||||
|
||||
// Inject the registration token and the runner name
|
||||
updated := mutatePod(&pod, runner.Status.Registration.Token)
|
||||
@@ -720,7 +721,7 @@ func mutatePod(pod *corev1.Pod, token string) *corev1.Pod {
|
||||
updated := pod.DeepCopy()
|
||||
|
||||
if getRunnerEnv(pod, EnvVarRunnerName) == "" {
|
||||
setRunnerEnv(updated, EnvVarRunnerName, pod.ObjectMeta.Name)
|
||||
setRunnerEnv(updated, EnvVarRunnerName, pod.Name)
|
||||
}
|
||||
|
||||
if getRunnerEnv(pod, EnvVarRunnerToken) == "" {
|
||||
@@ -770,11 +771,11 @@ func runnerHookEnvs(pod *corev1.Pod) ([]corev1.EnvVar, error) {
|
||||
|
||||
func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, githubBaseURL string, d RunnerPodDefaults) (corev1.Pod, error) {
|
||||
var (
|
||||
privileged bool = true
|
||||
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
||||
dockerEnabled bool = runnerSpec.DockerEnabled == nil || *runnerSpec.DockerEnabled
|
||||
ephemeral bool = runnerSpec.Ephemeral == nil || *runnerSpec.Ephemeral
|
||||
dockerdInRunnerPrivileged bool = dockerdInRunner
|
||||
privileged = true
|
||||
dockerdInRunner = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
||||
dockerEnabled = runnerSpec.DockerEnabled == nil || *runnerSpec.DockerEnabled
|
||||
ephemeral = runnerSpec.Ephemeral == nil || *runnerSpec.Ephemeral
|
||||
dockerdInRunnerPrivileged = dockerdInRunner
|
||||
|
||||
defaultRunnerImage = d.RunnerImage
|
||||
defaultRunnerImagePullSecrets = d.RunnerImagePullSecrets
|
||||
@@ -797,10 +798,10 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
||||
template = *template.DeepCopy()
|
||||
|
||||
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunner, "")
|
||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyPodMutation, LabelValuePodMutation)
|
||||
template.Labels = CloneAndAddLabel(template.Labels, LabelKeyRunner, "")
|
||||
template.Labels = CloneAndAddLabel(template.Labels, LabelKeyPodMutation, LabelValuePodMutation)
|
||||
if runnerSpec.GitHubAPICredentialsFrom != nil {
|
||||
template.ObjectMeta.Annotations = CloneAndAddLabel(template.ObjectMeta.Annotations, annotationKeyGitHubAPICredsSecret, runnerSpec.GitHubAPICredentialsFrom.SecretRef.Name)
|
||||
template.Annotations = CloneAndAddLabel(template.Annotations, annotationKeyGitHubAPICredsSecret, runnerSpec.GitHubAPICredentialsFrom.SecretRef.Name)
|
||||
}
|
||||
|
||||
workDir := runnerSpec.WorkDir
|
||||
@@ -887,10 +888,11 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
|
||||
|
||||
for i := range template.Spec.Containers {
|
||||
c := template.Spec.Containers[i]
|
||||
if c.Name == containerName {
|
||||
switch c.Name {
|
||||
case containerName:
|
||||
runnerContainerIndex = i
|
||||
runnerContainer = &c
|
||||
} else if c.Name == "docker" {
|
||||
case "docker":
|
||||
dockerdContainerIndex = i
|
||||
dockerdContainer = &c
|
||||
}
|
||||
@@ -1364,7 +1366,7 @@ func applyWorkVolumeClaimTemplateToPod(pod *corev1.Pod, workVolumeClaimTemplate
|
||||
}
|
||||
for i := range pod.Spec.Volumes {
|
||||
if pod.Spec.Volumes[i].Name == "work" {
|
||||
return fmt.Errorf("Work volume should not be specified in container mode kubernetes. workVolumeClaimTemplate field should be used instead.")
|
||||
return fmt.Errorf("work volume should not be specified in container mode kubernetes. workVolumeClaimTemplate field should be used instead")
|
||||
}
|
||||
}
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes, workVolumeClaimTemplate.V1Volume())
|
||||
|
||||
@@ -79,7 +79,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
}
|
||||
|
||||
if len(envvars) == 0 {
|
||||
return ctrl.Result{}, errors.New("Could not determine env vars for runner Pod")
|
||||
return ctrl.Result{}, errors.New("could not determine env vars for runner Pod")
|
||||
}
|
||||
|
||||
var enterprise, org, repo string
|
||||
@@ -103,8 +103,8 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if runnerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
finalizers, added := addFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||
if runnerPod.DeletionTimestamp.IsZero() {
|
||||
finalizers, added := addFinalizer(runnerPod.Finalizers, runnerPodFinalizerName)
|
||||
|
||||
var cleanupFinalizersAdded bool
|
||||
if isContainerMode {
|
||||
@@ -113,7 +113,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
|
||||
if added || cleanupFinalizersAdded {
|
||||
newRunner := runnerPod.DeepCopy()
|
||||
newRunner.ObjectMeta.Finalizers = finalizers
|
||||
newRunner.Finalizers = finalizers
|
||||
|
||||
if err := r.Patch(ctx, newRunner, client.MergeFrom(&runnerPod)); err != nil {
|
||||
log.Error(err, "Failed to update runner")
|
||||
@@ -142,7 +142,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
}
|
||||
}
|
||||
|
||||
if finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerLinkedResourcesFinalizerName); removed {
|
||||
if finalizers, removed := removeFinalizer(runnerPod.Finalizers, runnerLinkedResourcesFinalizerName); removed {
|
||||
if err := r.cleanupRunnerLinkedPods(ctx, &runnerPod, log); err != nil {
|
||||
log.Info("Runner-linked pods clean up that has failed due to an error. If this persists, please manually remove the runner-linked pods to unblock ARC", "err", err.Error())
|
||||
return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil
|
||||
@@ -152,7 +152,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil
|
||||
}
|
||||
patchedPod := runnerPod.DeepCopy()
|
||||
patchedPod.ObjectMeta.Finalizers = finalizers
|
||||
patchedPod.Finalizers = finalizers
|
||||
|
||||
if err := r.Patch(ctx, patchedPod, client.MergeFrom(&runnerPod)); err != nil {
|
||||
log.Error(err, "Failed to update runner for finalizer linked resources removal")
|
||||
@@ -163,7 +163,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
runnerPod = *patchedPod
|
||||
}
|
||||
|
||||
finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||
finalizers, removed := removeFinalizer(runnerPod.Finalizers, runnerPodFinalizerName)
|
||||
|
||||
if removed {
|
||||
// In a standard scenario, the upstream controller, like runnerset-controller, ensures this runner to be gracefully stopped before the deletion timestamp is set.
|
||||
@@ -175,7 +175,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
}
|
||||
|
||||
patchedPod := updatedPod.DeepCopy()
|
||||
patchedPod.ObjectMeta.Finalizers = finalizers
|
||||
patchedPod.Finalizers = finalizers
|
||||
|
||||
// We commit the removal of the finalizer so that Kuberenetes notices it and delete the pod resource from the cluster.
|
||||
if err := r.Patch(ctx, patchedPod, client.MergeFrom(&runnerPod)); err != nil {
|
||||
@@ -284,7 +284,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedPods(ctx context.Context, pod *
|
||||
var runnerLinkedPodList corev1.PodList
|
||||
if err := r.List(ctx, &runnerLinkedPodList, client.InNamespace(pod.Namespace), client.MatchingLabels(
|
||||
map[string]string{
|
||||
"runner-pod": pod.ObjectMeta.Name,
|
||||
"runner-pod": pod.Name,
|
||||
},
|
||||
)); err != nil {
|
||||
return fmt.Errorf("failed to list runner-linked pods: %w", err)
|
||||
@@ -295,7 +295,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedPods(ctx context.Context, pod *
|
||||
errs []error
|
||||
)
|
||||
for _, p := range runnerLinkedPodList.Items {
|
||||
if !p.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !p.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -307,7 +307,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedPods(ctx context.Context, pod *
|
||||
if kerrors.IsNotFound(err) || kerrors.IsGone(err) {
|
||||
return
|
||||
}
|
||||
errs = append(errs, fmt.Errorf("delete pod %q error: %v", p.ObjectMeta.Name, err))
|
||||
errs = append(errs, fmt.Errorf("delete pod %q error: %v", p.Name, err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -330,7 +330,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, po
|
||||
var runnerLinkedSecretList corev1.SecretList
|
||||
if err := r.List(ctx, &runnerLinkedSecretList, client.InNamespace(pod.Namespace), client.MatchingLabels(
|
||||
map[string]string{
|
||||
"runner-pod": pod.ObjectMeta.Name,
|
||||
"runner-pod": pod.Name,
|
||||
},
|
||||
)); err != nil {
|
||||
return fmt.Errorf("failed to list runner-linked secrets: %w", err)
|
||||
@@ -341,7 +341,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, po
|
||||
errs []error
|
||||
)
|
||||
for _, s := range runnerLinkedSecretList.Items {
|
||||
if !s.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !s.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -353,7 +353,7 @@ func (r *RunnerPodReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, po
|
||||
if kerrors.IsNotFound(err) || kerrors.IsGone(err) {
|
||||
return
|
||||
}
|
||||
errs = append(errs, fmt.Errorf("delete secret %q error: %v", s.ObjectMeta.Name, err))
|
||||
errs = append(errs, fmt.Errorf("delete secret %q error: %v", s.Name, err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ var _ owner = (*ownerStatefulSet)(nil)
|
||||
func (s *ownerStatefulSet) pods(ctx context.Context, c client.Client) ([]corev1.Pod, error) {
|
||||
var podList corev1.PodList
|
||||
|
||||
if err := c.List(ctx, &podList, client.MatchingLabels(s.StatefulSet.Spec.Template.ObjectMeta.Labels)); err != nil {
|
||||
if err := c.List(ctx, &podList, client.MatchingLabels(s.StatefulSet.Spec.Template.Labels)); err != nil {
|
||||
s.Log.Error(err, "Failed to list pods managed by statefulset")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !rd.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !rd.DeletionTimestamp.IsZero() {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
||||
}
|
||||
|
||||
if newestSet == nil {
|
||||
if err := r.Client.Create(ctx, desiredRS); err != nil {
|
||||
if err := r.Create(ctx, desiredRS); err != nil {
|
||||
log.Error(err, "Failed to create runnerreplicaset resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
@@ -138,7 +138,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
||||
}
|
||||
|
||||
if newestTemplateHash != desiredTemplateHash {
|
||||
if err := r.Client.Create(ctx, desiredRS); err != nil {
|
||||
if err := r.Create(ctx, desiredRS); err != nil {
|
||||
log.Error(err, "Failed to create runnerreplicaset resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
@@ -159,7 +159,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
||||
// but we still need to update the existing replicaset with it.
|
||||
// Otherwise selector-based runner query will never work on replicasets created before the controller v0.17.0
|
||||
// See https://github.com/actions/actions-runner-controller/pull/355#discussion_r585379259
|
||||
if err := r.Client.Update(ctx, updateSet); err != nil {
|
||||
if err := r.Update(ctx, updateSet); err != nil {
|
||||
log.Error(err, "Failed to update runnerreplicaset resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
@@ -195,7 +195,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
||||
newestSet.Spec.Replicas = &newDesiredReplicas
|
||||
newestSet.Spec.EffectiveTime = rd.Spec.EffectiveTime
|
||||
|
||||
if err := r.Client.Update(ctx, newestSet); err != nil {
|
||||
if err := r.Update(ctx, newestSet); err != nil {
|
||||
log.Error(err, "Failed to update runnerreplicaset resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
@@ -257,7 +257,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
||||
updated := rs.DeepCopy()
|
||||
zero := 0
|
||||
updated.Spec.Replicas = &zero
|
||||
if err := r.Client.Update(ctx, updated); err != nil {
|
||||
if err := r.Update(ctx, updated); err != nil {
|
||||
rslog.Error(err, "Failed to scale runnerreplicaset to zero")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
@@ -268,7 +268,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
||||
continue
|
||||
}
|
||||
|
||||
if err := r.Client.Delete(ctx, &rs); err != nil {
|
||||
if err := r.Delete(ctx, &rs); err != nil {
|
||||
rslog.Error(err, "Failed to delete runnerreplicaset resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
@@ -445,10 +445,10 @@ func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []str
|
||||
templateHash := ComputeHash(&newRSTemplate)
|
||||
|
||||
// Add template hash label to selector.
|
||||
newRSTemplate.ObjectMeta.Labels = CloneAndAddLabel(newRSTemplate.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
newRSTemplate.Labels = CloneAndAddLabel(newRSTemplate.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
|
||||
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||
newRSTemplate.ObjectMeta.Labels = CloneAndAddLabel(newRSTemplate.ObjectMeta.Labels, LabelKeyRunnerDeploymentName, rd.Name)
|
||||
newRSTemplate.Labels = CloneAndAddLabel(newRSTemplate.Labels, LabelKeyRunnerDeploymentName, rd.Name)
|
||||
|
||||
selector := getSelector(rd)
|
||||
|
||||
@@ -457,9 +457,9 @@ func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []str
|
||||
rs := v1alpha1.RunnerReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: rd.ObjectMeta.Name + "-",
|
||||
Namespace: rd.ObjectMeta.Namespace,
|
||||
Labels: newRSTemplate.ObjectMeta.Labels,
|
||||
GenerateName: rd.Name + "-",
|
||||
Namespace: rd.Namespace,
|
||||
Labels: newRSTemplate.Labels,
|
||||
},
|
||||
Spec: v1alpha1.RunnerReplicaSetSpec{
|
||||
Replicas: rd.Spec.Replicas,
|
||||
|
||||
@@ -62,7 +62,7 @@ func (r *RunnerReplicaSetReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !rs.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !rs.DeletionTimestamp.IsZero() {
|
||||
// RunnerReplicaSet cannot be gracefuly removed.
|
||||
// That means any runner that is running a job can be prematurely terminated.
|
||||
// To gracefully remove a RunnerReplicaSet, scale it down to zero first, observe RunnerReplicaSet's status replicas,
|
||||
@@ -70,14 +70,14 @@ func (r *RunnerReplicaSetReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if rs.ObjectMeta.Labels == nil {
|
||||
rs.ObjectMeta.Labels = map[string]string{}
|
||||
if rs.Labels == nil {
|
||||
rs.Labels = map[string]string{}
|
||||
}
|
||||
|
||||
// Template hash is usually set by the upstream controller(RunnerDeplloyment controller) on authoring
|
||||
// RunerReplicaset resource, but it may be missing when the user directly created RunnerReplicaSet.
|
||||
// As a template hash is required by by the runner replica management, we dynamically add it here without ever persisting it.
|
||||
if rs.ObjectMeta.Labels[LabelKeyRunnerTemplateHash] == "" {
|
||||
if rs.Labels[LabelKeyRunnerTemplateHash] == "" {
|
||||
template := rs.Spec.DeepCopy()
|
||||
template.Replicas = nil
|
||||
template.EffectiveTime = nil
|
||||
@@ -85,8 +85,8 @@ func (r *RunnerReplicaSetReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
||||
|
||||
log.Info("Using auto-generated template hash", "value", templateHash)
|
||||
|
||||
rs.ObjectMeta.Labels = CloneAndAddLabel(rs.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
rs.Spec.Template.ObjectMeta.Labels = CloneAndAddLabel(rs.Spec.Template.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
rs.Labels = CloneAndAddLabel(rs.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
rs.Spec.Template.Labels = CloneAndAddLabel(rs.Spec.Template.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
@@ -169,8 +169,8 @@ func (r *RunnerReplicaSetReconciler) newRunner(rs v1alpha1.RunnerReplicaSet) (v1
|
||||
// the "runner template hash" label to the template.meta which is necessary to make this controller work correctly
|
||||
objectMeta := rs.Spec.Template.ObjectMeta.DeepCopy()
|
||||
|
||||
objectMeta.GenerateName = rs.ObjectMeta.Name + "-"
|
||||
objectMeta.Namespace = rs.ObjectMeta.Namespace
|
||||
objectMeta.GenerateName = rs.Name + "-"
|
||||
objectMeta.Namespace = rs.Namespace
|
||||
if objectMeta.Annotations == nil {
|
||||
objectMeta.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if !runnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !runnerSet.DeletionTimestamp.IsZero() {
|
||||
r.GitHubClient.DeinitForRunnerSet(runnerSet)
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
@@ -191,11 +191,11 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
|
||||
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, r.CommonRunnerLabels...)
|
||||
|
||||
template := corev1.Pod{
|
||||
ObjectMeta: runnerSetWithOverrides.StatefulSetSpec.Template.ObjectMeta,
|
||||
Spec: runnerSetWithOverrides.StatefulSetSpec.Template.Spec,
|
||||
ObjectMeta: runnerSetWithOverrides.Template.ObjectMeta,
|
||||
Spec: runnerSetWithOverrides.Template.Spec,
|
||||
}
|
||||
|
||||
if runnerSet.Spec.RunnerConfig.ContainerMode == "kubernetes" {
|
||||
if runnerSet.Spec.ContainerMode == "kubernetes" {
|
||||
found := false
|
||||
for i := range template.Spec.Containers {
|
||||
if template.Spec.Containers[i].Name == containerName {
|
||||
@@ -208,7 +208,7 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
|
||||
})
|
||||
}
|
||||
|
||||
workDir := runnerSet.Spec.RunnerConfig.WorkDir
|
||||
workDir := runnerSet.Spec.WorkDir
|
||||
if workDir == "" {
|
||||
workDir = "/runner/_work"
|
||||
}
|
||||
@@ -219,7 +219,7 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
|
||||
template.Spec.ServiceAccountName = runnerSet.Spec.ServiceAccountName
|
||||
}
|
||||
|
||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunnerSetName, runnerSet.Name)
|
||||
template.Labels = CloneAndAddLabel(template.Labels, LabelKeyRunnerSetName, runnerSet.Name)
|
||||
|
||||
ghc, err := r.GitHubClient.InitForRunnerSet(ctx, runnerSet)
|
||||
if err != nil {
|
||||
@@ -228,38 +228,38 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
|
||||
|
||||
githubBaseURL := ghc.GithubBaseURL
|
||||
|
||||
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, githubBaseURL, r.RunnerPodDefaults)
|
||||
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.ContainerMode, template, runnerSet.Spec.RunnerConfig, githubBaseURL, r.RunnerPodDefaults)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
runnerSetWithOverrides.StatefulSetSpec.Template.ObjectMeta = pod.ObjectMeta
|
||||
runnerSetWithOverrides.StatefulSetSpec.Template.Spec = pod.Spec
|
||||
runnerSetWithOverrides.Template.ObjectMeta = pod.ObjectMeta
|
||||
runnerSetWithOverrides.Template.Spec = pod.Spec
|
||||
// NOTE: Seems like the only supported restart policy for statefulset is "Always"?
|
||||
// I got errosr like the below when tried to use "OnFailure":
|
||||
// StatefulSet.apps \"example-runnersetpg9rx\" is invalid: [spec.template.metadata.labels: Invalid value: map[string]string{\"runner-template-hash\"
|
||||
// :\"85d7578bd6\", \"runnerset-name\":\"example-runnerset\"}: `selector` does not match template `labels`, spec.
|
||||
// template.spec.restartPolicy: Unsupported value: \"OnFailure\": supported values: \"Always\"]
|
||||
runnerSetWithOverrides.StatefulSetSpec.Template.Spec.RestartPolicy = corev1.RestartPolicyAlways
|
||||
runnerSetWithOverrides.Template.Spec.RestartPolicy = corev1.RestartPolicyAlways
|
||||
|
||||
templateHash := ComputeHash(pod.Spec)
|
||||
|
||||
// Add template hash label to selector.
|
||||
runnerSetWithOverrides.Template.ObjectMeta.Labels = CloneAndAddLabel(runnerSetWithOverrides.Template.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
runnerSetWithOverrides.Template.Labels = CloneAndAddLabel(runnerSetWithOverrides.Template.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
|
||||
selector := getRunnerSetSelector(runnerSet)
|
||||
selector = CloneSelectorAndAddLabel(selector, LabelKeyRunnerTemplateHash, templateHash)
|
||||
selector = CloneSelectorAndAddLabel(selector, LabelKeyRunnerSetName, runnerSet.Name)
|
||||
selector = CloneSelectorAndAddLabel(selector, LabelKeyPodMutation, LabelValuePodMutation)
|
||||
|
||||
runnerSetWithOverrides.StatefulSetSpec.Selector = selector
|
||||
runnerSetWithOverrides.Selector = selector
|
||||
|
||||
rs := appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: runnerSet.ObjectMeta.Name + "-",
|
||||
Namespace: runnerSet.ObjectMeta.Namespace,
|
||||
Labels: CloneAndAddLabel(runnerSet.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash),
|
||||
GenerateName: runnerSet.Name + "-",
|
||||
Namespace: runnerSet.Namespace,
|
||||
Labels: CloneAndAddLabel(runnerSet.Labels, LabelKeyRunnerTemplateHash, templateHash),
|
||||
Annotations: map[string]string{
|
||||
SyncTimeAnnotationKey: time.Now().Format(time.RFC3339),
|
||||
},
|
||||
|
||||
@@ -23,7 +23,7 @@ const (
|
||||
func syncVolumes(ctx context.Context, c client.Client, log logr.Logger, ns string, runnerSet *v1alpha1.RunnerSet, statefulsets []appsv1.StatefulSet) (*ctrl.Result, error) {
|
||||
log = log.WithValues("ns", ns)
|
||||
|
||||
for _, t := range runnerSet.Spec.StatefulSetSpec.VolumeClaimTemplates {
|
||||
for _, t := range runnerSet.Spec.VolumeClaimTemplates {
|
||||
for _, sts := range statefulsets {
|
||||
pvcName := fmt.Sprintf("%s-%s-0", t.Name, sts.Name)
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type testResourceReader struct {
|
||||
}
|
||||
|
||||
func (r *testResourceReader) Get(_ context.Context, key client.ObjectKey, obj client.Object, _ ...client.GetOption) error {
|
||||
nsName := types.NamespacedName{Namespace: key.Namespace, Name: key.Name}
|
||||
nsName := types.NamespacedName(key)
|
||||
ret, ok := r.objects[nsName]
|
||||
if !ok {
|
||||
return &kerrors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}}
|
||||
|
||||
@@ -64,22 +64,22 @@ func Test_workVolumeClaimTemplateVolumeV1VolumeTransformation(t *testing.T) {
|
||||
t.Errorf("want name %q, got %q\n", want.Name, got.Name)
|
||||
}
|
||||
|
||||
if got.VolumeSource.Ephemeral == nil {
|
||||
if got.Ephemeral == nil {
|
||||
t.Fatal("work volume claim template should transform itself into Ephemeral volume source\n")
|
||||
}
|
||||
|
||||
if got.VolumeSource.Ephemeral.VolumeClaimTemplate == nil {
|
||||
if got.Ephemeral.VolumeClaimTemplate == nil {
|
||||
t.Fatal("work volume claim template should have ephemeral volume claim template set\n")
|
||||
}
|
||||
|
||||
gotClassName := *got.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||
wantClassName := *want.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||
gotClassName := *got.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||
wantClassName := *want.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||
if gotClassName != wantClassName {
|
||||
t.Errorf("expected storage class name %q, got %q\n", wantClassName, gotClassName)
|
||||
}
|
||||
|
||||
gotAccessModes := got.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||
wantAccessModes := want.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||
gotAccessModes := got.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||
wantAccessModes := want.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||
if len(gotAccessModes) != len(wantAccessModes) {
|
||||
t.Fatalf("access modes lengths missmatch: got %v, expected %v\n", gotAccessModes, wantAccessModes)
|
||||
}
|
||||
|
||||
@@ -43,6 +43,29 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
|
||||
|
||||
## Changelog
|
||||
|
||||
### 0.12.0
|
||||
|
||||
1. Allow use of client id as an app id [#4057](https://github.com/actions/actions-runner-controller/pull/4057)
|
||||
1. Relax version requirements to allow patch version mismatch [#4080](https://github.com/actions/actions-runner-controller/pull/4080)
|
||||
1. Refactor resource naming removing unnecessary calculations [#4076](https://github.com/actions/actions-runner-controller/pull/4076)
|
||||
1. Fix busy runners metric [#4016](https://github.com/actions/actions-runner-controller/pull/4016)
|
||||
1. Include more context to errors raised by github/actions client [#4032](https://github.com/actions/actions-runner-controller/pull/4032)
|
||||
1. Revised dashboard [#4022](https://github.com/actions/actions-runner-controller/pull/4022)
|
||||
1. feat(helm): move dind to sidecar [#3842](https://github.com/actions/actions-runner-controller/pull/3842)
|
||||
1. Pin third party actions [#3981](https://github.com/actions/actions-runner-controller/pull/3981)
|
||||
1. Fix docker lint warnings [#4074](https://github.com/actions/actions-runner-controller/pull/4074)
|
||||
1. Bump the gomod group across 1 directory with 7 updates [#4008](https://github.com/actions/actions-runner-controller/pull/4008)
|
||||
1. Bump go version [#4075](https://github.com/actions/actions-runner-controller/pull/4075)
|
||||
1. Add job_workflow_ref label to listener metrics [#4054](https://github.com/actions/actions-runner-controller/pull/4054)
|
||||
1. Bump github.com/cloudflare/circl from 1.6.0 to 1.6.1 [#4118](https://github.com/actions/actions-runner-controller/pull/4118)
|
||||
1. Avoid nil point when config.Metrics is nil and expose all metrics if none are configured [#4101](https://github.com/actions/actions-runner-controller/pull/4101)
|
||||
1. Bump github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 [#4120](https://github.com/actions/actions-runner-controller/pull/4120)
|
||||
1. Add startup probe to dind side-car [#4117](https://github.com/actions/actions-runner-controller/pull/4117)
|
||||
1. Delete config secret when listener pod gets deleted [#4033](https://github.com/actions/actions-runner-controller/pull/4033)
|
||||
1. Add response body to error when fetching access token [#4005](https://github.com/actions/actions-runner-controller/pull/4005)
|
||||
1. Azure Key Vault integration to resolve secrets [#4090](https://github.com/actions/actions-runner-controller/pull/4090)
|
||||
1. Create backoff mechanism for failed runners and allow re-creation of failed ephemeral runners [#4059](https://github.com/actions/actions-runner-controller/pull/4059)
|
||||
|
||||
### 0.11.0
|
||||
|
||||
1. Add events role permission to leader_election_role [#3988](https://github.com/actions/actions-runner-controller/pull/3988)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,11 @@
|
||||
# Visualizing Autoscaling Runner Scale Set metrics with Grafana
|
||||
|
||||
With metrics introduced in [gha-runner-scale-set-0.5.0](https://github.com/actions/actions-runner-controller/releases/tag/gha-runner-scale-set-0.5.0), you can now visualize the autoscaling behavior of your runner scale set with your tool of choice. This sample shows how to visualize the metrics with [Grafana](https://grafana.com/).
|
||||
With the metrics support introduced in [gha-runner-scale-set-0.5.0](https://github.com/actions/actions-runner-controller/releases/tag/gha-runner-scale-set-0.5.0), you can visualize the autoscaling behavior of your runner scale set with your tool of choice.
|
||||
|
||||
This sample dashboard shows how to visualize the metrics with [Grafana](https://grafana.com/).
|
||||
|
||||
> [!NOTE]
|
||||
> We do not intend to provide a supported ARC dashboard. This is simply a reference and a demonstration for how you could leverage the metrics emitted by the controller-manager and listeners to visualize the autoscaling behavior of your runner scale set. We offer no promises of future upgrades to this sample.
|
||||
|
||||
## Demo
|
||||
|
||||
@@ -8,12 +13,43 @@ With metrics introduced in [gha-runner-scale-set-0.5.0](https://github.com/actio
|
||||
|
||||
## Setup
|
||||
|
||||
We do not intend to provide a supported ARC dashboard. This is simply a reference and a demonstration for how you could leverage the metrics emitted by the controller-manager and listeners to visualize the autoscaling behavior of your runner scale set. We offer no promises of future upgrades to this sample.
|
||||
|
||||
1. Make sure to have [Grafana](https://grafana.com/docs/grafana/latest/installation/) and [Prometheus](https://prometheus.io/docs/prometheus/latest/installation/) running in your cluster.
|
||||
2. Make sure that Prometheus is properly scraping the metrics endpoints of the controller-manager and listeners.
|
||||
3. Import the [dashboard](ARC-Autoscaling-Runner-Set-Monitoring_1692627561838.json) into Grafana.
|
||||
|
||||
## Required metrics
|
||||
|
||||
This sample relies on the suggestion listener metrics configuration in the scale set [values.yaml](https://github.com/actions/actions-runner-controller/blob/ea27448da51385470b1ce67150aa695cfa45fd3f/charts/gha-runner-scale-set/values.yaml#L129-L270).
|
||||
|
||||
The following metrics are required to be scraped by Prometheus in order to populate the dashboard:
|
||||
|
||||
| Metric | Required labels | Source |
|
||||
| ------ | ----------- | -----|
|
||||
| container_fs_writes_bytes_total | namespace | cAdvisor
|
||||
| container_fs_reads_bytes_total | namespace | cAdvisor
|
||||
| container_memory_working_set_bytes | namespace | cAdvisor
|
||||
| controller_runtime_active_workers | controller | ARC Controller
|
||||
| controller_runtime_reconcile_time_seconds_sum | namespace | ARC Controller
|
||||
| controller_runtime_reconcile_errors_total | namespace | ARC Controller
|
||||
| gha_assigned_jobs | actions_github_com_scale_set_name, namespace | ARC Controller
|
||||
| gha_controller_failed_ephemeral_runners | name, namespace | ARC Controller
|
||||
| gha_controller_pending_ephemeral_runners | name, namespace | ARC Controller
|
||||
| gha_controller_running_ephemeral_runners | name, namespace | ARC Controller
|
||||
| gha_controller_running_listeners | namespace | ARC Controller
|
||||
| gha_desired_runners | actions_github_com_scale_set_name, namespace | ARC Listener
|
||||
| gha_idle_runners | actions_github_com_scale_set_name, namespace | ARC Listener
|
||||
| gha_job_execution_duration_seconds_bucket | actions_github_com_scale_set_name, actions_github_com_scale_set_namespace | ARC Listener
|
||||
| gha_job_startup_duration_seconds_bucket | actions_github_com_scale_set_name, actions_github_com_scale_set_namespace | ARC Listener
|
||||
| gha_registered_runners | actions_github_com_scale_set_name, namespace | ARC Listener
|
||||
| gha_running_jobs | actions_github_com_scale_set_name, actions_github_com_scale_set_namespace | ARC Listener
|
||||
| kube_pod_container_status_ready | namespace | kube-state-metrics
|
||||
| kube_pod_container_status_terminated_reason | namespace, reason | kube-state-metrics
|
||||
| kube_pod_container_status_waiting | namespace | kube-state-metrics
|
||||
| rest_client_requests_total | code, method, namespace | ARC Controller
|
||||
| scrape_duration_seconds | | prometheus
|
||||
| workqueue_depth | name, namespace | ARC Controller
|
||||
| workqueue_queue_duration_seconds_sum | namespace | ARC Controller
|
||||
|
||||
## Details
|
||||
|
||||
This dashboard demonstrates some of the metrics provided by ARC and the underlying Kubernetes runtime. It provides a sample visualization of the behavior of the runner scale set, the ARC controllers, and the listeners. This should not be considered a comprehensive dashboard; it is a starting point that can be used with other metrics and logs to understand the health of the cluster. Review the [GitHub documentation detailing the Actions Runner Controller metrics and how to enable them](https://docs.github.com/en/enterprise-server@3.10/actions/hosting-your-own-runners/managing-self-hosted-runners-with-actions-runner-controller/deploying-runner-scale-sets-with-actions-runner-controller#enabling-metrics).
|
||||
@@ -22,16 +58,25 @@ The dashboard includes the following metrics:
|
||||
|
||||
| Label | Description |
|
||||
| -------------------------------- | ----------------------------------------------------|
|
||||
| Active listeners | The number of listeners currently running and attempting to manage jobs for the scale set. This should match the number of scale sets deployed. |
|
||||
| Runner States | Displays the number of runners in a given state. The finished and deleted states are not included in this panel. |
|
||||
| Failed (total) | The total number of ephemeral runners that have failed to properly start. This may require reviewing the custom resource and logs to identify and resolve the root causes. Common causes include resource issues and failure to pull the required image. |
|
||||
| Pending (total) | The total number of ephemeral runners that ARC has requested and is waiting for Kubernetes to provide in a running state. If the Kubernetes API server is responsive, this will typically match the number of runner pods that are in a pending state. This number includes requests for runner pods that have not yet been scheduled. When this number is higher than the number of runner pods in a pending state, it can indicate performance issues with the API server and resource contention. |
|
||||
| Idle (total) | The total number of ephemeral runners that are available to accept jobs across all scale sets. Keeping a pool of idle runners can enable a faster start time under load, but excessive idle runners will consume resources and can prevent nodes from scaling down. |
|
||||
| Total assigned jobs per listener | The number of workflow jobs acquired and assigned to the listener. The listener must provide supporting runners to complete these jobs. Once jobs are assigned, they cannot be delegated to other listeners and must be processed by the scale set or cancelled. |
|
||||
| Assigned vs running jobs | Compares the number of jobs assigned against the number of runners that are currently processing jobs. When running jobs is less than assigned jobs, it can indicate that ARC is waiting on Kubernetes to provide and start additional runners. |
|
||||
| Average startup duration | The average time in seconds between when jobs are assigned and when a runner accepts the job and begins processing. An increasing duration can indicate that the cluster has resource contention or a lack of available nodes for scheduling jobs |
|
||||
| Average execution duration | The average time in seconds that runners are taking to complete a job. Changes in this value reflect the efficiency of workflow jobs and the pod configuration. If the value is decreasing without changes to the job, it can indicate resource contention or CPU throttling. |
|
||||
| Startup Duration | Heat map of the wait time before a job starts, with the colors indicating the increase in the number of jobs in that time bucket. An increasing time can indicate that the cluster is resource constrained and may need additional nodes or resources to handle the load. |
|
||||
| Execution Duration | Heat map of the execution time for a job, with the colors indicating the increase in the number of jobs in that time bucket. Time can be affected by the number of steps in the job, the allocated CPU, and whether there is resource contention on the node that is impacting performance |
|
||||
| Assigned Jobs | The number of jobs that have been assigned to the listener. This is the number of jobs that the listener is responsible for providing a runner to process. |
|
||||
| Desired Runners | The number of runners that the listener is requesting from the controller. This is the number of runners required to process the assigned jobs and provide idle runners. It is limited by the configured maximum runner count for the scale set. |
|
||||
| Idle Runners | The total number of ephemeral runners that are available to accept jobs across all selected scale sets. Keeping a pool of idle runners can enable a faster start time under load, but excessive idle runners will consume resources and can prevent nodes from scaling down. |
|
||||
| Running Jobs | The number of runners that are currently processing jobs. |
|
||||
| Failed Runners | The total number of ephemeral runners that have failed to properly start. This may require reviewing the custom resource and logs to identify and resolve the root causes. Common causes include resource issues and failure to pull the required image. |
|
||||
| Listeners | The number of listeners currently running and attempting to manage jobs for the scale set. This should match the number of scale sets deployed. |
|
||||
| Pending Runners | The total number of ephemeral runners that ARC has requested and is waiting for Kubernetes to provide in a running state. If the Kubernetes API server is responsive, this will typically match the number of runner pods that are in a pending state. This number includes requests for runner pods that have not yet been scheduled. When this number is higher than the number of runner pods in a pending state, it can indicate performance issues. |
|
||||
| Registered Runners | The total number of ephemeral runners that have been successfully registered. |
|
||||
| Active Runners | The total number of runners that are active and either available or processing jobs. |
|
||||
| Out of Memory | The number of containers that have been terminated by the OOMKiller. This can indicate that the requests/ limits for one or more pods on the node were configured improperly, allowing pods to request more memory than the node had available. |
|
||||
| Peak Container Memory | The maximum amount of memory used by any container in a given namespace during the selected time. This can be used for tuning the memory limits for the pods and for alerts as containers get close to their limits.
|
||||
| Container I/O | Shows the number of bytes read and written to the container filesystem. This can be used to identify if the container is reading or writing a large amount of data to the filesystem, which can impact performance. |
|
||||
| Container Pod Status | Shows the number of containers in each status (waiting, running, terminated, ready). This can be used to identify if there are a large number of containers that are failing to start or are in a waiting state. |
|
||||
| Reconcile time | The time to perform a single reconciliation task from a controller's work queue. This metric reflects the time it takes for ARC to complete each step in the processing of creating, managing, and cleaning up runners. As this increases, it can indicate resource contention, processing delays, or delays from the API server. |
|
||||
| Workqueue Queue Duration | The time items spent in the work queue for a controller before being processed. This is often related to the work queue depth; as the number of items increases, it can take an increasing amount of time for an item to be processed. |
|
||||
| Reconciliation errors | Reconciliation is the process of a controller ensuring the desired state and actual state of the resources match. Each time an event occurs on a resource watched by the controller, the controller is required to indicate if the new state matches the desired state. Kubernetes adds a task to the work queue for the controller to perform this reconciliation. Errors indicate that controller has not achieved a desired state and is requesting Kubernetes to queue another request for reconciliation. Ideally, this number remains close to zero. An increasing number can indicate resource contention or delays processing API server requests. This reflects Kubernetes resources that ARC is waiting to be provided or in the necessary state. As a concrete example, ARC will request the creation of a secret prior to creating the pod. If the response indicates the secret is not immediately ready, ARC will requeue the reconciliation task with the error details, incrementing this count. |
|
||||
| Reconciliation time | A histogram reflecting the time in seconds to perform a single reconciliation task from the controller's work queue. A histogram counts the number of requests that are processed within a given bucket of time. This metric reflects the time it takes for ARC to complete each step in the processing of creating, managing, and cleaning up runners. As this increases, it can indicate resource contention or processing delays within Kubernetes or the API server. This displays shows an average, which may hide larger or smaller times that are occurring in the processing. |
|
||||
| Workqueue depth | The number of tasks that Kubernetes queued for the ARC controllers to process. This includes reconciliation requests and tasks from ARC. ARC sequentially processes a work queue of single, small task to avoid concurrency issues. Managing a runner requires multiple steps to prepare, create, update, and delete the runner, its resources, and the ARC custom resources. As each step is completed (or trigger reconciliation), new tasks are queued for processing. As the depth increases, it indicates more tasks awaiting time from the controller. Growth indicates increasing work and may indicate Kubernetes resource contention or processing latencies. Each request for a new runner will result in multiple tasks being added to the work queue to prepare and create the runner and the related ARC custom resources. |
|
||||
| Workqueue depth | The number of tasks that Kubernetes has queued for the ARC controllers to process. This includes reconciliation requests and tasks initiated by the controller. Managing a runner requires multiple steps to prepare, create, update, and delete the runner, its resources, and the ARC custom resources. As each step is completed (or trigger reconciliation), new tasks are queued for processing. The controller will then use one or more workers to process these tasks in the order they were queued. As the depth increases, it indicates more tasks awaiting time from the controller. Growth indicates increasing work and may reflect Kubernetes resource contention or processing latencies. Each request for a new runner will result in multiple tasks being added to the work queue to prepare and create the runner and the related ARC custom resources. |
|
||||
| Active Workers | The number of workers that are actively processing tasks in the work queue. If the queue is empty, then there may be no workers required to process the tasks. The number of workers for the ephemeral runner is configurable in the scale set values file. |
|
||||
| API Calls | Shows the number of calls to the API server by status code and HTTP method. The method indicates the type of activity being performed, while the status code indicates the result of the activity. Error codes of 500 and above often indicate a Kubernetes issue. |
|
||||
| Scrape Duration (seconds) | The amount of time required for Prometheus to read the configured metrics from components in the cluster. An increasing number may indicate a lack of resources for Prometheus and a risk of the process exceeding the configured timeout, leading to lost metrics data. |
|
||||
|
||||
BIN
docs/gha-runner-scale-set-controller/samples/grafana-dashboard/grafana-sample.png
(Stored with Git LFS)
BIN
docs/gha-runner-scale-set-controller/samples/grafana-dashboard/grafana-sample.png
(Stored with Git LFS)
Binary file not shown.
@@ -1060,10 +1060,15 @@ func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, c
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusCreated {
|
||||
errMsg := fmt.Sprintf("failed to get access token for GitHub App auth (%v)", resp.Status)
|
||||
if body, err := io.ReadAll(resp.Body); err == nil {
|
||||
errMsg = fmt.Sprintf("%s: %s", errMsg, string(body))
|
||||
}
|
||||
|
||||
return nil, &GitHubAPIError{
|
||||
StatusCode: resp.StatusCode,
|
||||
RequestID: resp.Header.Get(HeaderGitHubRequestID),
|
||||
Err: fmt.Errorf("failed to get access token for GitHub App auth: %v", resp.Status),
|
||||
Err: errors.New(errMsg),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1212,7 +1217,7 @@ func createJWTForGitHubApp(appAuth *GitHubAppAuth) (string, error) {
|
||||
claims := &jwt.RegisteredClaims{
|
||||
IssuedAt: jwt.NewNumericDate(issuedAt),
|
||||
ExpiresAt: jwt.NewNumericDate(expiresAt),
|
||||
Issuer: strconv.FormatInt(appAuth.AppID, 10),
|
||||
Issuer: appAuth.AppID,
|
||||
}
|
||||
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims)
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestAcquireJobs(t *testing.T) {
|
||||
RunnerScaleSet: &actions.RunnerScaleSet{Id: 1},
|
||||
MessageQueueAccessToken: "abc",
|
||||
}
|
||||
var requestIDs []int64 = []int64{1}
|
||||
var requestIDs = []int64{1}
|
||||
|
||||
retryMax := 1
|
||||
actualRetry := 0
|
||||
|
||||
@@ -67,7 +67,7 @@ func TestGetRunnerByName(t *testing.T) {
|
||||
|
||||
t.Run("Get Runner by Name", func(t *testing.T) {
|
||||
var runnerID int64 = 1
|
||||
var runnerName string = "self-hosted-ubuntu"
|
||||
var runnerName = "self-hosted-ubuntu"
|
||||
want := &actions.RunnerReference{
|
||||
Id: int(runnerID),
|
||||
Name: runnerName,
|
||||
@@ -87,7 +87,7 @@ func TestGetRunnerByName(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Get Runner by name with not exist runner", func(t *testing.T) {
|
||||
var runnerName string = "self-hosted-ubuntu"
|
||||
var runnerName = "self-hosted-ubuntu"
|
||||
response := []byte(`{"count": 0, "value": []}`)
|
||||
|
||||
server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -103,7 +103,7 @@ func TestGetRunnerByName(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Default retries on server error", func(t *testing.T) {
|
||||
var runnerName string = "self-hosted-ubuntu"
|
||||
var runnerName = "self-hosted-ubuntu"
|
||||
|
||||
retryWaitMax := 1 * time.Millisecond
|
||||
retryMax := 1
|
||||
@@ -181,7 +181,7 @@ func TestGetRunnerGroupByName(t *testing.T) {
|
||||
|
||||
t.Run("Get RunnerGroup by Name", func(t *testing.T) {
|
||||
var runnerGroupID int64 = 1
|
||||
var runnerGroupName string = "test-runner-group"
|
||||
var runnerGroupName = "test-runner-group"
|
||||
want := &actions.RunnerGroup{
|
||||
ID: runnerGroupID,
|
||||
Name: runnerGroupName,
|
||||
@@ -201,7 +201,7 @@ func TestGetRunnerGroupByName(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Get RunnerGroup by name with not exist runner group", func(t *testing.T) {
|
||||
var runnerGroupName string = "test-runner-group"
|
||||
var runnerGroupName = "test-runner-group"
|
||||
response := []byte(`{"count": 0, "value": []}`)
|
||||
|
||||
server := newActionsServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -123,7 +123,6 @@ var defaultRunnerScaleSetJitRunnerConfig = &actions.RunnerScaleSetJitRunnerConfi
|
||||
|
||||
// FakeClient implements actions service
|
||||
type FakeClient struct {
|
||||
id uuid.UUID
|
||||
getRunnerScaleSetResult struct {
|
||||
*actions.RunnerScaleSet
|
||||
err error
|
||||
@@ -192,9 +191,7 @@ type FakeClient struct {
|
||||
}
|
||||
|
||||
func NewFakeClient(options ...Option) actions.ActionsService {
|
||||
f := &FakeClient{
|
||||
id: uuid.New(),
|
||||
}
|
||||
f := &FakeClient{}
|
||||
f.applyDefaults()
|
||||
for _, opt := range options {
|
||||
opt(f)
|
||||
@@ -202,10 +199,6 @@ func NewFakeClient(options ...Option) actions.ActionsService {
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *FakeClient) ID() uuid.UUID {
|
||||
return f.id
|
||||
}
|
||||
|
||||
func (f *FakeClient) applyDefaults() {
|
||||
f.getRunnerScaleSetResult.RunnerScaleSet = defaultRunnerScaleSet
|
||||
f.getRunnerScaleSetByIdResult.RunnerScaleSet = defaultRunnerScaleSet
|
||||
|
||||
@@ -3,6 +3,7 @@ package fake
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
)
|
||||
|
||||
@@ -34,10 +35,6 @@ func NewMultiClient(opts ...MultiClientOption) actions.MultiClient {
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *fakeMultiClient) GetClientFor(ctx context.Context, githubConfigURL string, creds actions.ActionsAuth, namespace string, options ...actions.ClientOption) (actions.ActionsService, error) {
|
||||
return f.defaultClient, f.defaultErr
|
||||
}
|
||||
|
||||
func (f *fakeMultiClient) GetClientFromSecret(ctx context.Context, githubConfigURL, namespace string, secretData actions.KubernetesSecretData, options ...actions.ClientOption) (actions.ActionsService, error) {
|
||||
func (f *fakeMultiClient) GetClientFor(ctx context.Context, githubConfigURL string, appConfig *appconfig.AppConfig, namespace string, options ...actions.ClientOption) (actions.ActionsService, error) {
|
||||
return f.defaultClient, f.defaultErr
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func TestClient_Identifier(t *testing.T) {
|
||||
}
|
||||
defaultAppCreds := &actions.ActionsAuth{
|
||||
AppCreds: &actions.GitHubAppAuth{
|
||||
AppID: 123,
|
||||
AppID: "123",
|
||||
AppInstallationID: 123,
|
||||
AppPrivateKey: "private key",
|
||||
},
|
||||
@@ -90,7 +90,7 @@ func TestClient_Identifier(t *testing.T) {
|
||||
old: defaultAppCreds,
|
||||
new: &actions.ActionsAuth{
|
||||
AppCreds: &actions.GitHubAppAuth{
|
||||
AppID: 456,
|
||||
AppID: "456",
|
||||
AppInstallationID: 456,
|
||||
AppPrivateKey: "new private key",
|
||||
},
|
||||
|
||||
@@ -3,15 +3,14 @@ package actions
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
type MultiClient interface {
|
||||
GetClientFor(ctx context.Context, githubConfigURL string, creds ActionsAuth, namespace string, options ...ClientOption) (ActionsService, error)
|
||||
GetClientFromSecret(ctx context.Context, githubConfigURL, namespace string, secretData KubernetesSecretData, options ...ClientOption) (ActionsService, error)
|
||||
GetClientFor(ctx context.Context, githubConfigURL string, appConfig *appconfig.AppConfig, namespace string, options ...ClientOption) (ActionsService, error)
|
||||
}
|
||||
|
||||
type multiClient struct {
|
||||
@@ -23,7 +22,8 @@ type multiClient struct {
|
||||
}
|
||||
|
||||
type GitHubAppAuth struct {
|
||||
AppID int64
|
||||
// AppID is the ID or the Client ID of the application
|
||||
AppID string
|
||||
AppInstallationID int64
|
||||
AppPrivateKey string
|
||||
}
|
||||
@@ -49,15 +49,22 @@ func NewMultiClient(logger logr.Logger) MultiClient {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string, creds ActionsAuth, namespace string, options ...ClientOption) (ActionsService, error) {
|
||||
func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string, appConfig *appconfig.AppConfig, namespace string, options ...ClientOption) (ActionsService, error) {
|
||||
m.logger.Info("retrieve actions client", "githubConfigURL", githubConfigURL, "namespace", namespace)
|
||||
|
||||
if creds.Token == "" && creds.AppCreds == nil {
|
||||
return nil, fmt.Errorf("no credentials provided. either a PAT or GitHub App credentials should be provided")
|
||||
if err := appConfig.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("failed to validate app config: %w", err)
|
||||
}
|
||||
|
||||
if creds.Token != "" && creds.AppCreds != nil {
|
||||
return nil, fmt.Errorf("both PAT and GitHub App credentials provided. should only provide one")
|
||||
var creds ActionsAuth
|
||||
if len(appConfig.Token) > 0 {
|
||||
creds.Token = appConfig.Token
|
||||
} else {
|
||||
creds.AppCreds = &GitHubAppAuth{
|
||||
AppID: appConfig.AppID,
|
||||
AppInstallationID: appConfig.AppInstallationID,
|
||||
AppPrivateKey: appConfig.AppPrivateKey,
|
||||
}
|
||||
}
|
||||
|
||||
client, err := NewClient(
|
||||
@@ -68,7 +75,7 @@ func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string,
|
||||
}, options...)...,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to instantiate new client: %w", err)
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
@@ -93,47 +100,3 @@ func (m *multiClient) GetClientFor(ctx context.Context, githubConfigURL string,
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
type KubernetesSecretData map[string][]byte
|
||||
|
||||
func (m *multiClient) GetClientFromSecret(ctx context.Context, githubConfigURL, namespace string, secretData KubernetesSecretData, options ...ClientOption) (ActionsService, error) {
|
||||
if len(secretData) == 0 {
|
||||
return nil, fmt.Errorf("must provide secret data with either PAT or GitHub App Auth")
|
||||
}
|
||||
|
||||
token := string(secretData["github_token"])
|
||||
hasToken := len(token) > 0
|
||||
|
||||
appID := string(secretData["github_app_id"])
|
||||
appInstallationID := string(secretData["github_app_installation_id"])
|
||||
appPrivateKey := string(secretData["github_app_private_key"])
|
||||
hasGitHubAppAuth := len(appID) > 0 && len(appInstallationID) > 0 && len(appPrivateKey) > 0
|
||||
|
||||
if hasToken && hasGitHubAppAuth {
|
||||
return nil, fmt.Errorf("must provide secret with only PAT or GitHub App Auth to avoid ambiguity in client behavior")
|
||||
}
|
||||
|
||||
if !hasToken && !hasGitHubAppAuth {
|
||||
return nil, fmt.Errorf("neither PAT nor GitHub App Auth credentials provided in secret")
|
||||
}
|
||||
|
||||
auth := ActionsAuth{}
|
||||
|
||||
if hasToken {
|
||||
auth.Token = token
|
||||
return m.GetClientFor(ctx, githubConfigURL, auth, namespace, options...)
|
||||
}
|
||||
|
||||
parsedAppID, err := strconv.ParseInt(appID, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parsedAppInstallationID, err := strconv.ParseInt(appInstallationID, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
auth.AppCreds = &GitHubAppAuth{AppID: parsedAppID, AppInstallationID: parsedAppInstallationID, AppPrivateKey: appPrivateKey}
|
||||
return m.GetClientFor(ctx, githubConfigURL, auth, namespace, options...)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1/appconfig"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -23,10 +24,13 @@ func TestMultiClientCaching(t *testing.T) {
|
||||
|
||||
defaultNamespace := "default"
|
||||
defaultConfigURL := "https://github.com/org/repo"
|
||||
defaultCreds := &ActionsAuth{
|
||||
defaultCreds := &appconfig.AppConfig{
|
||||
Token: "token",
|
||||
}
|
||||
client, err := NewClient(defaultConfigURL, defaultCreds)
|
||||
defaultAuth := ActionsAuth{
|
||||
Token: defaultCreds.Token,
|
||||
}
|
||||
client, err := NewClient(defaultConfigURL, &defaultAuth)
|
||||
require.NoError(t, err)
|
||||
|
||||
multiClient.clients[ActionsClientKey{client.Identifier(), defaultNamespace}] = client
|
||||
@@ -35,7 +39,7 @@ func TestMultiClientCaching(t *testing.T) {
|
||||
cachedClient, err := multiClient.GetClientFor(
|
||||
ctx,
|
||||
defaultConfigURL,
|
||||
*defaultCreds,
|
||||
defaultCreds,
|
||||
defaultNamespace,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -47,7 +51,7 @@ func TestMultiClientCaching(t *testing.T) {
|
||||
newClient, err := multiClient.GetClientFor(
|
||||
ctx,
|
||||
defaultConfigURL,
|
||||
*defaultCreds,
|
||||
defaultCreds,
|
||||
otherNamespace,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -63,7 +67,7 @@ func TestMultiClientOptions(t *testing.T) {
|
||||
defaultConfigURL := "https://github.com/org/repo"
|
||||
|
||||
t.Run("GetClientFor", func(t *testing.T) {
|
||||
defaultCreds := &ActionsAuth{
|
||||
defaultCreds := &appconfig.AppConfig{
|
||||
Token: "token",
|
||||
}
|
||||
|
||||
@@ -71,7 +75,7 @@ func TestMultiClientOptions(t *testing.T) {
|
||||
service, err := multiClient.GetClientFor(
|
||||
ctx,
|
||||
defaultConfigURL,
|
||||
*defaultCreds,
|
||||
defaultCreds,
|
||||
defaultNamespace,
|
||||
)
|
||||
service.SetUserAgent(testUserAgent)
|
||||
@@ -83,27 +87,6 @@ func TestMultiClientOptions(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, testUserAgent.String(), req.Header.Get("User-Agent"))
|
||||
})
|
||||
|
||||
t.Run("GetClientFromSecret", func(t *testing.T) {
|
||||
secret := map[string][]byte{
|
||||
"github_token": []byte("token"),
|
||||
}
|
||||
|
||||
multiClient := NewMultiClient(logger)
|
||||
service, err := multiClient.GetClientFromSecret(
|
||||
ctx,
|
||||
defaultConfigURL,
|
||||
defaultNamespace,
|
||||
secret,
|
||||
)
|
||||
service.SetUserAgent(testUserAgent)
|
||||
require.NoError(t, err)
|
||||
|
||||
client := service.(*Client)
|
||||
req, err := client.NewGitHubAPIRequest(ctx, "GET", "/test", nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, testUserAgent.String(), req.Header.Get("User-Agent"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestCreateJWT(t *testing.T) {
|
||||
@@ -137,7 +120,7 @@ etFcaQuTHEZyRhhJ4BU=
|
||||
-----END PRIVATE KEY-----`
|
||||
|
||||
auth := &GitHubAppAuth{
|
||||
AppID: 123,
|
||||
AppID: "123",
|
||||
AppPrivateKey: key,
|
||||
}
|
||||
jwt, err := createJWTForGitHubApp(auth)
|
||||
|
||||
@@ -127,7 +127,7 @@ func NewServer(opts ...Option) *httptest.Server {
|
||||
},
|
||||
|
||||
// For ListRunners
|
||||
"/repos/test/valid/actions/runners": config.FixedResponses.ListRunners,
|
||||
"/repos/test/valid/actions/runners": config.ListRunners,
|
||||
"/repos/test/invalid/actions/runners": &Handler{
|
||||
Status: http.StatusNoContent,
|
||||
Body: "",
|
||||
@@ -204,10 +204,10 @@ func NewServer(opts ...Option) *httptest.Server {
|
||||
},
|
||||
|
||||
// For auto-scaling based on the number of queued(pending) workflow runs
|
||||
"/repos/test/valid/actions/runs": config.FixedResponses.ListRepositoryWorkflowRuns,
|
||||
"/repos/test/valid/actions/runs": config.ListRepositoryWorkflowRuns,
|
||||
|
||||
// For auto-scaling based on the number of queued(pending) workflow jobs
|
||||
"/repos/test/valid/actions/runs/": config.FixedResponses.ListWorkflowJobs,
|
||||
"/repos/test/valid/actions/runs/": config.ListWorkflowJobs,
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
||||
@@ -12,7 +12,7 @@ type Option func(*ServerConfig)
|
||||
|
||||
func WithListRepositoryWorkflowRunsResponse(status int, body, queued, in_progress string) Option {
|
||||
return func(c *ServerConfig) {
|
||||
c.FixedResponses.ListRepositoryWorkflowRuns = &Handler{
|
||||
c.ListRepositoryWorkflowRuns = &Handler{
|
||||
Status: status,
|
||||
Body: body,
|
||||
Statuses: map[string]string{
|
||||
@@ -25,7 +25,7 @@ func WithListRepositoryWorkflowRunsResponse(status int, body, queued, in_progres
|
||||
|
||||
func WithListWorkflowJobsResponse(status int, bodies map[int]string) Option {
|
||||
return func(c *ServerConfig) {
|
||||
c.FixedResponses.ListWorkflowJobs = &MapHandler{
|
||||
c.ListWorkflowJobs = &MapHandler{
|
||||
Status: status,
|
||||
Bodies: bodies,
|
||||
}
|
||||
@@ -34,7 +34,7 @@ func WithListWorkflowJobsResponse(status int, bodies map[int]string) Option {
|
||||
|
||||
func WithListRunnersResponse(status int, body string) Option {
|
||||
return func(c *ServerConfig) {
|
||||
c.FixedResponses.ListRunners = &ListRunnersHandler{
|
||||
c.ListRunners = &ListRunnersHandler{
|
||||
Status: status,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
@@ -290,7 +290,7 @@ func (c *Client) ListRunnerGroupRepositoryAccesses(ctx context.Context, org stri
|
||||
|
||||
opts := github.ListOptions{PerPage: 100}
|
||||
for {
|
||||
list, res, err := c.Client.Actions.ListRepositoryAccessRunnerGroup(ctx, org, runnerGroupId, &opts)
|
||||
list, res, err := c.Actions.ListRepositoryAccessRunnerGroup(ctx, org, runnerGroupId, &opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list repository access for runner group: %w", err)
|
||||
}
|
||||
@@ -323,32 +323,32 @@ func (c *Client) cleanup() {
|
||||
|
||||
func (c *Client) createRegistrationToken(ctx context.Context, enterprise, org, repo string) (*github.RegistrationToken, *github.Response, error) {
|
||||
if len(repo) > 0 {
|
||||
return c.Client.Actions.CreateRegistrationToken(ctx, org, repo)
|
||||
return c.Actions.CreateRegistrationToken(ctx, org, repo)
|
||||
}
|
||||
if len(org) > 0 {
|
||||
return c.Client.Actions.CreateOrganizationRegistrationToken(ctx, org)
|
||||
return c.Actions.CreateOrganizationRegistrationToken(ctx, org)
|
||||
}
|
||||
return c.Client.Enterprise.CreateRegistrationToken(ctx, enterprise)
|
||||
return c.Enterprise.CreateRegistrationToken(ctx, enterprise)
|
||||
}
|
||||
|
||||
func (c *Client) removeRunner(ctx context.Context, enterprise, org, repo string, runnerID int64) (*github.Response, error) {
|
||||
if len(repo) > 0 {
|
||||
return c.Client.Actions.RemoveRunner(ctx, org, repo, runnerID)
|
||||
return c.Actions.RemoveRunner(ctx, org, repo, runnerID)
|
||||
}
|
||||
if len(org) > 0 {
|
||||
return c.Client.Actions.RemoveOrganizationRunner(ctx, org, runnerID)
|
||||
return c.Actions.RemoveOrganizationRunner(ctx, org, runnerID)
|
||||
}
|
||||
return c.Client.Enterprise.RemoveRunner(ctx, enterprise, runnerID)
|
||||
return c.Enterprise.RemoveRunner(ctx, enterprise, runnerID)
|
||||
}
|
||||
|
||||
func (c *Client) listRunners(ctx context.Context, enterprise, org, repo string, opts *github.ListOptions) (*github.Runners, *github.Response, error) {
|
||||
if len(repo) > 0 {
|
||||
return c.Client.Actions.ListRunners(ctx, org, repo, opts)
|
||||
return c.Actions.ListRunners(ctx, org, repo, opts)
|
||||
}
|
||||
if len(org) > 0 {
|
||||
return c.Client.Actions.ListOrganizationRunners(ctx, org, opts)
|
||||
return c.Actions.ListOrganizationRunners(ctx, org, opts)
|
||||
}
|
||||
return c.Client.Enterprise.ListRunners(ctx, enterprise, opts)
|
||||
return c.Enterprise.ListRunners(ctx, enterprise, opts)
|
||||
}
|
||||
|
||||
func (c *Client) ListRepositoryWorkflowRuns(ctx context.Context, user string, repoName string) ([]*github.WorkflowRun, error) {
|
||||
@@ -381,7 +381,7 @@ func (c *Client) listRepositoryWorkflowRuns(ctx context.Context, user string, re
|
||||
}
|
||||
|
||||
for {
|
||||
list, res, err := c.Client.Actions.ListRepositoryWorkflowRuns(ctx, user, repoName, &opts)
|
||||
list, res, err := c.Actions.ListRepositoryWorkflowRuns(ctx, user, repoName, &opts)
|
||||
|
||||
if err != nil {
|
||||
return workflowRuns, fmt.Errorf("failed to list workflow runs: %v", err)
|
||||
|
||||
@@ -26,7 +26,7 @@ func newTestClient() *Client {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
client.Client.BaseURL = baseURL
|
||||
client.BaseURL = baseURL
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
37
go.mod
37
go.mod
@@ -1,8 +1,11 @@
|
||||
module github.com/actions/actions-runner-controller
|
||||
|
||||
go 1.24.0
|
||||
go 1.24.3
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.3.0
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.14.0
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible
|
||||
@@ -17,16 +20,16 @@ require (
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/ginkgo/v2 v2.23.4
|
||||
github.com/onsi/gomega v1.37.0
|
||||
github.com/onsi/ginkgo/v2 v2.23.3
|
||||
github.com/onsi/gomega v1.36.3
|
||||
github.com/prometheus/client_golang v1.21.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/teambition/rrule-go v1.8.2
|
||||
go.uber.org/multierr v1.11.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/net v0.39.0
|
||||
golang.org/x/net v0.38.0
|
||||
golang.org/x/oauth2 v0.28.0
|
||||
golang.org/x/sync v0.13.0
|
||||
golang.org/x/sync v0.12.0
|
||||
gomodules.xyz/jsonpatch/v2 v2.5.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.32.3
|
||||
@@ -39,6 +42,9 @@ require (
|
||||
|
||||
require (
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect
|
||||
github.com/BurntSushi/toml v1.4.0 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect
|
||||
@@ -80,7 +86,7 @@ require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/boombuler/barcode v1.0.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cloudflare/circl v1.6.0 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
@@ -93,9 +99,9 @@ require (
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.9.0 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gonvenience/bunt v1.4.0 // indirect
|
||||
github.com/gonvenience/idem v0.0.1 // indirect
|
||||
@@ -108,7 +114,7 @@ require (
|
||||
github.com/google/go-github/v69 v69.2.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
|
||||
github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/gruntwork-io/go-commons v0.17.2 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
@@ -122,6 +128,7 @@ require (
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/mattn/go-ciede2000 v0.0.0-20170301095244-782e8c62fec3 // indirect
|
||||
@@ -135,7 +142,7 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/pquerna/otp v1.4.0 // indirect
|
||||
@@ -151,18 +158,16 @@ require (
|
||||
github.com/virtuald/go-ordered-json v0.0.0-20170621173500-b18e6e673d74 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
golang.org/x/crypto v0.37.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/term v0.31.0 // indirect
|
||||
golang.org/x/text v0.24.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
golang.org/x/tools v0.32.0 // indirect
|
||||
golang.org/x/tools v0.31.0 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.32.2 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
|
||||
57
go.sum
57
go.sum
@@ -1,5 +1,22 @@
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
github.com/Azure/azure-sdk-for-go v51.0.0+incompatible h1:p7blnyJSjJqf5jflHbSGhIhEpXIgIFmYZNg5uwqweso=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 h1:1mvYtZfWQAnwNah/C+Z+Jb9rQH95LPE2vlmMuWAHJk8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1/go.mod h1:75I/mXtme1JyWFtz8GocPHVFyH421IBoZErnO16dd0k=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1 h1:Bk5uOhSAenHyR5P61D/NzeQCv+4fEVV8mOkJ82NqpWw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1/go.mod h1:QZ4pw3or1WPmRBxf0cHd1tknzrT54WPBOQoGutCPvSU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.3.0 h1:WLUIpeyv04H0RCcQHaA4TNoyrQ39Ox7V+re+iaqzTe0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.3.0/go.mod h1:hd8hTTIY3VmUVPRHNH7GVCHO3SHgXkJKZHReby/bnUQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.0 h1:eXnN9kaS8TiDwXjoie3hMRLuwdUBUMW9KRgOqB3mCaw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.0/go.mod h1:XIpam8wumeZ5rVMuhdDQLMfIPDf1WO3IzrCRO3e3e3o=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
|
||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw=
|
||||
@@ -89,14 +106,16 @@ github.com/bradleyfalzon/ghinstallation/v2 v2.14.0 h1:0D4vKCHOvYrDU8u61TnE2JfNT4
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.14.0/go.mod h1:LOVmdZYVZ8jqdr4n9wWm1ocDiMz9IfMGfRkaYC1a52A=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cloudflare/circl v1.6.0 h1:cr5JKic4HI+LkINy2lg3W2jF8sHCVTBncJr5gIIq7qk=
|
||||
github.com/cloudflare/circl v1.6.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
|
||||
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
|
||||
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
|
||||
@@ -127,7 +146,6 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-sql-driver/mysql v1.9.0 h1:Y0zIbQXhQKmQgTp44Y1dp3wTXcn804QoTptLZT1vtvo=
|
||||
github.com/go-sql-driver/mysql v1.9.0/go.mod h1:pDetrLJeA3oMujJuvXc8RJoasr589B6A9fwzD3QMrqw=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
@@ -135,6 +153,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
@@ -178,8 +198,6 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7 h1:+J3r2e8+RsmN3vKfo75g0YSY61ms37qzPglu4p0sGro=
|
||||
github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
@@ -218,6 +236,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
|
||||
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
|
||||
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs=
|
||||
github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
@@ -261,7 +281,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
@@ -269,14 +288,12 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0=
|
||||
github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM=
|
||||
github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
|
||||
github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU=
|
||||
github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
|
||||
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
@@ -292,6 +309,8 @@ github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
|
||||
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
@@ -323,8 +342,6 @@ github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGC
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
@@ -336,8 +353,6 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
@@ -350,8 +365,6 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
|
||||
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -360,8 +373,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -371,21 +382,16 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
|
||||
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -395,8 +401,6 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU=
|
||||
golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ=
|
||||
golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
|
||||
golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -420,7 +424,6 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWM
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
||||
19
hash/hash.go
19
hash/hash.go
@@ -49,22 +49,3 @@ func ComputeTemplateHash(template interface{}) string {
|
||||
|
||||
return rand.SafeEncodeString(fmt.Sprint(hasher.Sum32()))
|
||||
}
|
||||
|
||||
func ComputeCombinedObjectsHash(first any, others ...any) string {
|
||||
hasher := fnv.New32a()
|
||||
|
||||
hasher.Reset()
|
||||
|
||||
printer := spew.ConfigState{
|
||||
Indent: " ",
|
||||
SortKeys: true,
|
||||
DisableMethods: true,
|
||||
SpewKeys: true,
|
||||
}
|
||||
|
||||
for _, obj := range append([]any{first}, others...) {
|
||||
printer.Fprintf(hasher, "%#v", obj)
|
||||
}
|
||||
|
||||
return rand.SafeEncodeString(fmt.Sprint(hasher.Sum32()))
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user