Compare commits

..

11 Commits

Author SHA1 Message Date
Yusuke Kuoka
d72e6f60c9 Make EphemeralRunnerReconciler create runner pods faster and earlier 2024-09-30 04:48:33 +00:00
Yusuke Kuoka
3778fb0f65 INCLUDE_RUNNER_SCALE_SET_NAME_IN_JOB_LABELS=true to include it into job metrics labels 2024-08-29 05:20:33 +00:00
Yusuke Kuoka
ae7ade0191 Add request labels to progress traces log 2024-08-29 04:54:12 +00:00
Yusuke Kuoka
8237ef2cba wip 2024-06-24 09:45:23 +00:00
Yusuke Kuoka
f9aa2c691c Do enable ddotel integration on ghalistener 2024-06-24 05:12:24 +00:00
Yusuke Kuoka
2cda3ec63f Remove workflows unused in a forked repo 2024-05-27 05:21:51 +00:00
Yusuke Kuoka
c15380c97b Remove legacy-canary-build job from global-publish-canary.yaml as unused in a forked repo 2024-05-27 05:21:51 +00:00
Yusuke Kuoka
fa819abbb5 Add job started attrs 2024-05-27 05:21:51 +00:00
Yusuke Kuoka
7bcc5c3448 chore: goinstrument 2024-05-27 05:21:51 +00:00
Yusuke Kuoka
a340bfa790 Add prep_fork.sh for removing worfklows unused in fork 2024-05-27 05:21:51 +00:00
Yusuke Kuoka
7a11d7270f Add goinstrument helper 2024-05-27 05:06:58 +00:00
65 changed files with 1036 additions and 1693 deletions

View File

@@ -47,7 +47,7 @@ runs:
-d '{"ref": "main", "inputs": { "arc_name": "${{inputs.arc-name}}" } }'
- name: Fetch workflow run & job ids
uses: actions/github-script@v7
uses: actions/github-script@v6
id: query_workflow
with:
script: |
@@ -128,7 +128,7 @@ runs:
- name: Wait for workflow to start running
if: inputs.wait-to-running == 'true' && inputs.wait-to-finish == 'false'
uses: actions/github-script@v7
uses: actions/github-script@v6
with:
script: |
function sleep(ms) {
@@ -156,7 +156,7 @@ runs:
- name: Wait for workflow to finish successfully
if: inputs.wait-to-finish == 'true'
uses: actions/github-script@v7
uses: actions/github-script@v6
with:
script: |
// Wait 5 minutes and make sure the workflow run we triggered completed with result 'success'

View File

@@ -27,7 +27,7 @@ runs:
using: "composite"
steps:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
@@ -36,7 +36,7 @@ runs:
driver-opts: image=moby/buildkit:v0.10.6
- name: Build controller image
uses: docker/build-push-action@v5
uses: docker/build-push-action@v3
with:
file: Dockerfile
platforms: linux/amd64
@@ -56,7 +56,7 @@ runs:
- name: Get configure token
id: config-token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
with:
application_id: ${{ inputs.app-id }}
application_private_key: ${{ inputs.app-pk }}

View File

@@ -24,23 +24,23 @@ runs:
shell: bash
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
with:
version: latest
- name: Login to DockerHub
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }}
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
username: ${{ inputs.username }}
password: ${{ inputs.password }}
- name: Login to GitHub Container Registry
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }}
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ inputs.ghcr_username }}

View File

@@ -1,212 +0,0 @@
name: Publish ARC Helm Charts
# Revert to https://github.com/actions-runner-controller/releases#releases
# for details on why we use this approach
on:
push:
branches:
- master
paths:
- 'charts/**'
- '.github/workflows/arc-publish-chart.yaml'
- '!charts/actions-runner-controller/docs/**'
- '!charts/gha-runner-scale-set-controller/**'
- '!charts/gha-runner-scale-set/**'
- '!**.md'
workflow_dispatch:
inputs:
force:
description: 'Force publish even if the chart version is not bumped'
type: boolean
required: true
default: false
env:
KUBE_SCORE_VERSION: 1.10.0
HELM_VERSION: v3.8.0
permissions:
contents: write
concurrency:
group: ${{ github.workflow }}
cancel-in-progress: true
jobs:
lint-chart:
name: Lint Chart
runs-on: ubuntu-latest
outputs:
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
- name: Set up kube-score
run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score
- name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
fi
- name: Run chart-testing (lint)
run: |
ct lint --config charts/.ci/ct-config.yaml
- name: Create kind cluster
if: steps.list-changed.outputs.changed == 'true'
uses: helm/kind-action@v1.4.0
# We need cert-manager already installed in the cluster because we assume the CRDs exist
- name: Install cert-manager
if: steps.list-changed.outputs.changed == 'true'
run: |
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
- name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true'
run: ct install --config charts/.ci/ct-config.yaml
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
- name: Check if Chart Publish is Needed
id: publish-chart-step
run: |
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
# Always publish if force is true
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
echo "publish=true" >> $GITHUB_OUTPUT
else
echo "publish=false" >> $GITHUB_OUTPUT
fi
- name: Job summary
run: |
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
publish-chart:
if: needs.lint-chart.outputs.publish-chart == 'true'
needs: lint-chart
name: Publish Chart
runs-on: ubuntu-latest
permissions:
contents: write # for helm/chart-releaser-action to push chart release and create a release
env:
CHART_TARGET_ORG: actions-runner-controller
CHART_TARGET_REPO: actions-runner-controller.github.io
CHART_TARGET_BRANCH: master
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
organization: ${{ env.CHART_TARGET_ORG }}
- name: Install chart-releaser
uses: helm/chart-releaser-action@v1.4.1
with:
install_only: true
install_dir: ${{ github.workspace }}/bin
- name: Package and upload release assets
run: |
cr package \
${{ github.workspace }}/charts/actions-runner-controller/ \
--package-path .cr-release-packages
cr upload \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--package-path .cr-release-packages \
--token ${{ secrets.GITHUB_TOKEN }}
- name: Generate updated index.yaml
run: |
cr index \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--index-path ${{ github.workspace }}/index.yaml \
--token ${{ secrets.GITHUB_TOKEN }} \
--push \
--pages-branch 'gh-pages' \
--pages-index-path 'index.yaml'
# Chart Release was never intended to publish to a different repo
# this workaround is intended to move the index.yaml to the target repo
# where the github pages are hosted
- name: Checkout target repository
uses: actions/checkout@v4
with:
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
path: ${{ env.CHART_TARGET_REPO }}
ref: ${{ env.CHART_TARGET_BRANCH }}
token: ${{ steps.get_workflow_token.outputs.token }}
- name: Copy index.yaml
run: |
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
- name: Commit and push to target repository
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
git add .
git commit -m "Update index.yaml"
git push
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
- name: Job summary
run: |
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY

View File

@@ -1,109 +0,0 @@
name: Publish ARC Image
# Revert to https://github.com/actions-runner-controller/releases#releases
# for details on why we use this approach
on:
release:
types:
- published
workflow_dispatch:
inputs:
release_tag_name:
description: 'Tag name of the release to publish'
required: true
push_to_registries:
description: 'Push images to registries'
required: true
type: boolean
default: false
permissions:
contents: write
packages: write
env:
TARGET_ORG: actions-runner-controller
TARGET_REPO: actions-runner-controller
concurrency:
group: ${{ github.workflow }}
cancel-in-progress: true
jobs:
release-controller:
name: Release
runs-on: ubuntu-latest
# gha-runner-scale-set has its own release workflow.
# We don't want to publish a new actions-runner-controller image
# we release gha-runner-scale-set.
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
- name: Install tools
run: |
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.2.0/kubebuilder_2.2.0_linux_amd64.tar.gz
tar zxvf kubebuilder_2.2.0_linux_amd64.tar.gz
sudo mv kubebuilder_2.2.0_linux_amd64 /usr/local/kubebuilder
curl -s https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh | bash
sudo mv kustomize /usr/local/bin
curl -L -O https://github.com/tcnksm/ghr/releases/download/v0.13.0/ghr_v0.13.0_linux_amd64.tar.gz
tar zxvf ghr_v0.13.0_linux_amd64.tar.gz
sudo mv ghr_v0.13.0_linux_amd64/ghr /usr/local/bin
- name: Set version env variable
run: |
# Define the release tag name based on the event type
if [[ "${{ github.event_name }}" == "release" ]]; then
echo "VERSION=$(cat ${GITHUB_EVENT_PATH} | jq -r '.release.tag_name')" >> $GITHUB_ENV
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
echo "VERSION=${{ inputs.release_tag_name }}" >> $GITHUB_ENV
fi
- name: Upload artifacts
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
make github-release
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
organization: ${{ env.TARGET_ORG }}
- name: Resolve push to registries
run: |
# Define the push to registries based on the event type
if [[ "${{ github.event_name }}" == "release" ]]; then
echo "PUSH_TO_REGISTRIES=true" >> $GITHUB_ENV
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
echo "PUSH_TO_REGISTRIES=${{ inputs.push_to_registries }}" >> $GITHUB_ENV
fi
- name: Trigger Build And Push Images To Registries
run: |
# Authenticate
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
# Trigger the workflow run
jq -n '{"event_type": "arc", "client_payload": {"release_tag_name": "${{ env.VERSION }}", "push_to_registries": "${{ env.PUSH_TO_REGISTRIES }}" }}' \
| gh api -X POST /repos/actions-runner-controller/releases/dispatches --input -
- name: Job summary
run: |
echo "The [publish-arc](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-arc.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
echo "- Release tag: ${{ env.VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- Push to registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-arc.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-arc.yaml)" >> $GITHUB_STEP_SUMMARY

View File

@@ -1,79 +0,0 @@
name: Release ARC Runner Images
# Revert to https://github.com/actions-runner-controller/releases#releases
# for details on why we use this approach
on:
# We must do a trigger on a push: instead of a types: closed so GitHub Secrets
# are available to the workflow run
push:
branches:
- 'master'
paths:
- 'runner/VERSION'
- '.github/workflows/arc-release-runners.yaml'
env:
# Safeguard to prevent pushing images to registeries after build
PUSH_TO_REGISTRIES: true
TARGET_ORG: actions-runner-controller
TARGET_WORKFLOW: release-runners.yaml
DOCKER_VERSION: 24.0.7
concurrency:
group: ${{ github.workflow }}
cancel-in-progress: true
jobs:
build-runners:
name: Trigger Build and Push of Runner Images
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Get runner version
id: versions
run: |
runner_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))"
container_hooks_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))"
echo runner_version=$runner_current_version >> $GITHUB_OUTPUT
echo container_hooks_version=$container_hooks_current_version >> $GITHUB_OUTPUT
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
organization: ${{ env.TARGET_ORG }}
- name: Trigger Build And Push Runner Images To Registries
env:
RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }}
CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }}
run: |
# Authenticate
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
# Trigger the workflow run
gh workflow run ${{ env.TARGET_WORKFLOW }} -R ${{ env.TARGET_ORG }}/releases \
-f runner_version=${{ env.RUNNER_VERSION }} \
-f docker_version=${{ env.DOCKER_VERSION }} \
-f runner_container_hooks_version=${{ env.CONTAINER_HOOKS_VERSION }} \
-f sha='${{ github.sha }}' \
-f push_to_registries=${{ env.PUSH_TO_REGISTRIES }}
- name: Job summary
env:
RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }}
CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }}
run: |
echo "The [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
echo "- runner_version: ${{ env.RUNNER_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- docker_version: ${{ env.DOCKER_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- runner_container_hooks_version: ${{ env.CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
echo "- push_to_registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/release-runners.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/release-runners.yaml)" >> $GITHUB_STEP_SUMMARY

View File

@@ -1,153 +0,0 @@
# This workflows polls releases from actions/runner and in case of a new one it
# updates files containing runner version and opens a pull request.
name: Runner Updates Check (Scheduled Job)
on:
schedule:
# run daily
- cron: "0 9 * * *"
workflow_dispatch:
jobs:
# check_versions compares our current version and the latest available runner
# version and sets them as outputs.
check_versions:
runs-on: ubuntu-latest
env:
GH_TOKEN: ${{ github.token }}
outputs:
runner_current_version: ${{ steps.runner_versions.outputs.runner_current_version }}
runner_latest_version: ${{ steps.runner_versions.outputs.runner_latest_version }}
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
steps:
- uses: actions/checkout@v4
- name: Get runner current and latest versions
id: runner_versions
run: |
CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))"
echo "Current version: $CURRENT_VERSION"
echo runner_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner | grep -oP '(?<=v)[0-9.]+' | head -1)
echo "Latest version: $LATEST_VERSION"
echo runner_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
- name: Get container-hooks current and latest versions
id: container_hooks_versions
run: |
CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))"
echo "Current version: $CURRENT_VERSION"
echo container_hooks_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner-container-hooks | grep -oP '(?<=v)[0-9.]+' | head -1)
echo "Latest version: $LATEST_VERSION"
echo container_hooks_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
# check_pr checks if a PR for the same update already exists. It only runs if
# runner latest version != our current version. If no existing PR is found,
# it sets a PR name as output.
check_pr:
runs-on: ubuntu-latest
needs: check_versions
if: needs.check_versions.outputs.runner_current_version != needs.check_versions.outputs.runner_latest_version || needs.check_versions.outputs.container_hooks_current_version != needs.check_versions.outputs.container_hooks_latest_version
outputs:
pr_name: ${{ steps.pr_name.outputs.pr_name }}
env:
GH_TOKEN: ${{ github.token }}
steps:
- name: debug
run:
echo "RUNNER_CURRENT_VERSION=${{ needs.check_versions.outputs.runner_current_version }}"
echo "RUNNER_LATEST_VERSION=${{ needs.check_versions.outputs.runner_latest_version }}"
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
- uses: actions/checkout@v4
- name: PR Name
id: pr_name
env:
RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }}
RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }}
CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }}
CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }}
# Generate a PR name with the following title:
# Updates: runner to v2.304.0 and container-hooks to v0.3.1
run: |
RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}"
CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}"
PR_NAME="Updates:"
if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ]
then
PR_NAME="$PR_NAME $RUNNER_MESSAGE"
fi
if [ "$CONTAINER_HOOKS_CURRENT_VERSION" != "$CONTAINER_HOOKS_LATEST_VERSION" ]
then
PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE"
fi
result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1)
if [ -z "$result" ]
then
echo "No existing PRs found, setting output with pr_name=$PR_NAME"
echo pr_name=$PR_NAME >> $GITHUB_OUTPUT
else
echo "Found a PR with title '$PR_NAME' already existing: ${{ github.server_url }}/${{ github.repository }}/pull/$result"
fi
# update_version updates runner version in the files listed below, commits
# the changes and opens a pull request as `github-actions` bot.
update_version:
runs-on: ubuntu-latest
needs:
- check_versions
- check_pr
if: needs.check_pr.outputs.pr_name
permissions:
pull-requests: write
contents: write
actions: write
env:
GH_TOKEN: ${{ github.token }}
RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }}
RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }}
CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }}
CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }}
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
steps:
- uses: actions/checkout@v4
- name: New branch
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
- name: Update files
run: |
CURRENT_VERSION="${RUNNER_CURRENT_VERSION//./\\.}"
LATEST_VERSION="${RUNNER_LATEST_VERSION//./\\.}"
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
CURRENT_VERSION="${CONTAINER_HOOKS_CURRENT_VERSION//./\\.}"
LATEST_VERSION="${CONTAINER_HOOKS_LATEST_VERSION//./\\.}"
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
- name: Commit changes
run: |
# from https://github.com/orgs/community/discussions/26560
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
git config user.name "github-actions[bot]"
git add .
git commit -m "$PR_NAME"
git push -u origin HEAD
- name: Create pull request
run: gh pr create -f -l "runners update"

View File

@@ -1,103 +0,0 @@
name: Validate Helm Chart
on:
pull_request:
branches:
- master
paths:
- 'charts/**'
- '.github/workflows/arc-validate-chart.yaml'
- '!charts/actions-runner-controller/docs/**'
- '!**.md'
- '!charts/gha-runner-scale-set-controller/**'
- '!charts/gha-runner-scale-set/**'
push:
paths:
- 'charts/**'
- '.github/workflows/arc-validate-chart.yaml'
- '!charts/actions-runner-controller/docs/**'
- '!**.md'
- '!charts/gha-runner-scale-set-controller/**'
- '!charts/gha-runner-scale-set/**'
workflow_dispatch:
env:
KUBE_SCORE_VERSION: 1.10.0
HELM_VERSION: v3.8.0
permissions:
contents: read
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
# but not pushes to master branch by making the concurrency group name unique
# for pushes
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
validate-chart:
name: Lint Chart
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
- name: Set up kube-score
run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score
- name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
--ignore-test pod-networkpolicy
--ignore-test deployment-has-poddisruptionbudget
--ignore-test deployment-has-host-podantiaffinity
--ignore-test container-security-context
--ignore-test pod-probes
--ignore-test container-image-tag
--enable-optional-test container-security-context-privileged
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
fi
- name: Run chart-testing (lint)
run: |
ct lint --config charts/.ci/ct-config.yaml
- name: Create kind cluster
uses: helm/kind-action@v1.4.0
if: steps.list-changed.outputs.changed == 'true'
# We need cert-manager already installed in the cluster because we assume the CRDs exist
- name: Install cert-manager
if: steps.list-changed.outputs.changed == 'true'
run: |
helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
- name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true'
run: |
ct install --config charts/.ci/ct-config.yaml

View File

@@ -1,52 +0,0 @@
name: Validate ARC Runners
on:
pull_request:
branches:
- '**'
paths:
- 'runner/**'
- 'test/startup/**'
- '!**.md'
permissions:
contents: read
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
# but not pushes to master branch by making the concurrency group name unique
# for pushes
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
shellcheck:
name: runner / shellcheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: shellcheck
uses: reviewdog/action-shellcheck@v1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
path: "./runner"
pattern: |
*.sh
*.bash
update-status
# Make this consistent with `make shellsheck`
shellcheck_flags: "--shell bash --source-path runner"
exclude: "./.git/*"
check_all_files_with_shebangs: "false"
# Set this to "true" once we addressed all the shellcheck findings
fail_on_error: "false"
test-runner-entrypoint:
name: Test entrypoint
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Run tests
run: |
make acceptance/runner/startup

View File

@@ -16,7 +16,7 @@ env:
TARGET_ORG: actions-runner-controller
TARGET_REPO: arc_e2e_test_dummy
IMAGE_NAME: "arc-test-image"
IMAGE_VERSION: "0.9.3"
IMAGE_VERSION: "0.9.2"
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
@@ -33,7 +33,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
@@ -122,7 +122,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
@@ -213,7 +213,7 @@ jobs:
env:
WORKFLOW_FILE: arc-test-dind-workflow.yaml
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
@@ -303,7 +303,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
@@ -402,7 +402,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
@@ -503,7 +503,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
@@ -598,7 +598,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-workflow.yaml"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
@@ -718,7 +718,7 @@ jobs:
env:
WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml"
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: ${{github.head_ref}}
@@ -888,7 +888,7 @@ jobs:
env:
WORKFLOW_FILE: arc-test-workflow.yaml
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}

View File

@@ -1,212 +0,0 @@
name: (gha) Publish Helm Charts
on:
workflow_dispatch:
inputs:
ref:
description: 'The branch, tag or SHA to cut a release from'
required: false
type: string
default: ''
release_tag_name:
description: 'The name to tag the controller image with'
required: true
type: string
default: 'canary'
push_to_registries:
description: 'Push images to registries'
required: true
type: boolean
default: false
publish_gha_runner_scale_set_controller_chart:
description: 'Publish new helm chart for gha-runner-scale-set-controller'
required: true
type: boolean
default: false
publish_gha_runner_scale_set_chart:
description: 'Publish new helm chart for gha-runner-scale-set'
required: true
type: boolean
default: false
env:
HELM_VERSION: v3.8.0
permissions:
packages: write
concurrency:
group: ${{ github.workflow }}
cancel-in-progress: true
jobs:
build-push-image:
name: Build and push controller image
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
- name: Check chart versions
# Binary version and chart versions need to match.
# In case of an upgrade, the controller will try to clean up
# resources with older versions that should have been cleaned up
# during the upgrade process
run: ./hack/check-gh-chart-versions.sh ${{ inputs.release_tag_name }}
- name: Resolve parameters
id: resolve_parameters
run: |
resolvedRef="${{ inputs.ref }}"
if [ -z "$resolvedRef" ]
then
resolvedRef="${{ github.ref }}"
fi
echo "resolved_ref=$resolvedRef" >> $GITHUB_OUTPUT
echo "INFO: Resolving short SHA for $resolvedRef"
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
echo "INFO: Normalizing repository name (lowercase)"
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
# BuildKit v0.11 which has a bug causing intermittent
# failures pushing images to GHCR
version: v0.9.1
driver-opts: image=moby/buildkit:v0.10.6
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build & push controller image
uses: docker/build-push-action@v5
with:
file: Dockerfile
platforms: linux/amd64,linux/arm64
build-args: VERSION=${{ inputs.release_tag_name }}
push: ${{ inputs.push_to_registries }}
tags: |
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}-${{ steps.resolve_parameters.outputs.short_sha }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Job summary
run: |
echo "The [gha-publish-chart.yaml](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/gha-publish-chart.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
echo "- Release tag: ${{ inputs.release_tag_name }}" >> $GITHUB_STEP_SUMMARY
echo "- Push to registries: ${{ inputs.push_to_registries }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
publish-helm-chart-gha-runner-scale-set-controller:
if: ${{ inputs.publish_gha_runner_scale_set_controller_chart == true }}
needs: build-push-image
name: Publish Helm chart for gha-runner-scale-set-controller
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
- name: Resolve parameters
id: resolve_parameters
run: |
resolvedRef="${{ inputs.ref }}"
if [ -z "$resolvedRef" ]
then
resolvedRef="${{ github.ref }}"
fi
echo "INFO: Resolving short SHA for $resolvedRef"
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
echo "INFO: Normalizing repository name (lowercase)"
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
- name: Publish new helm chart for gha-runner-scale-set-controller
run: |
echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin
GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG=$(cat charts/gha-runner-scale-set-controller/Chart.yaml | grep version: | cut -d " " -f 2)
echo "GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG=${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}" >> $GITHUB_ENV
helm package charts/gha-runner-scale-set-controller/ --version="${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}"
helm push gha-runner-scale-set-controller-"${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts
- name: Job summary
run: |
echo "New helm chart for gha-runner-scale-set-controller published successfully!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
echo "- gha-runner-scale-set-controller Chart version: ${{ env.GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY
publish-helm-chart-gha-runner-scale-set:
if: ${{ inputs.publish_gha_runner_scale_set_chart == true }}
needs: build-push-image
name: Publish Helm chart for gha-runner-scale-set
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
- name: Resolve parameters
id: resolve_parameters
run: |
resolvedRef="${{ inputs.ref }}"
if [ -z "$resolvedRef" ]
then
resolvedRef="${{ github.ref }}"
fi
echo "INFO: Resolving short SHA for $resolvedRef"
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
echo "INFO: Normalizing repository name (lowercase)"
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
- name: Publish new helm chart for gha-runner-scale-set
run: |
echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin
GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG=$(cat charts/gha-runner-scale-set/Chart.yaml | grep version: | cut -d " " -f 2)
echo "GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG=${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}" >> $GITHUB_ENV
helm package charts/gha-runner-scale-set/ --version="${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}"
helm push gha-runner-scale-set-"${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts
- name: Job summary
run: |
echo "New helm chart for gha-runner-scale-set published successfully!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
echo "- gha-runner-scale-set Chart version: ${{ env.GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY

View File

@@ -1,125 +0,0 @@
name: (gha) Validate Helm Charts
on:
pull_request:
branches:
- master
paths:
- 'charts/**'
- '.github/workflows/gha-validate-chart.yaml'
- '!charts/actions-runner-controller/**'
- '!**.md'
push:
paths:
- 'charts/**'
- '.github/workflows/gha-validate-chart.yaml'
- '!charts/actions-runner-controller/**'
- '!**.md'
workflow_dispatch:
env:
KUBE_SCORE_VERSION: 1.16.1
HELM_VERSION: v3.8.0
permissions:
contents: read
concurrency:
# This will make sure we only apply the concurrency limits on pull requests
# but not pushes to master branch by making the concurrency group name unique
# for pushes
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
validate-chart:
name: Lint Chart
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Helm
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
with:
version: ${{ env.HELM_VERSION }}
- name: Set up kube-score
run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score
- name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
--ignore-test pod-networkpolicy
--ignore-test deployment-has-poddisruptionbudget
--ignore-test deployment-has-host-podantiaffinity
--ignore-test container-security-context
--ignore-test pod-probes
--ignore-test container-image-tag
--enable-optional-test container-security-context-privileged
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
ct version
changed=$(ct list-changed --config charts/.ci/ct-config-gha.yaml)
if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
fi
- name: Run chart-testing (lint)
run: |
ct lint --config charts/.ci/ct-config-gha.yaml
- name: Set up docker buildx
uses: docker/setup-buildx-action@v3
if: steps.list-changed.outputs.changed == 'true'
with:
version: latest
- name: Build controller image
uses: docker/build-push-action@v5
if: steps.list-changed.outputs.changed == 'true'
with:
file: Dockerfile
platforms: linux/amd64
load: true
build-args: |
DOCKER_IMAGE_NAME=test-arc
VERSION=dev
tags: |
test-arc:dev
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Create kind cluster
uses: helm/kind-action@v1.4.0
if: steps.list-changed.outputs.changed == 'true'
with:
cluster_name: chart-testing
- name: Load image into cluster
if: steps.list-changed.outputs.changed == 'true'
run: |
export DOCKER_IMAGE_NAME=test-arc
export VERSION=dev
export IMG_RESULT=load
make docker-buildx
kind load docker-image test-arc:dev --name chart-testing
- name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true'
run: |
ct install --config charts/.ci/ct-config-gha.yaml

View File

@@ -46,54 +46,15 @@ env:
PUSH_TO_REGISTRIES: true
jobs:
legacy-canary-build:
name: Build and Publish Legacy Canary Image
runs-on: ubuntu-latest
env:
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
TARGET_ORG: actions-runner-controller
TARGET_REPO: actions-runner-controller
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Get Token
id: get_workflow_token
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
organization: ${{ env.TARGET_ORG }}
- name: Trigger Build And Push Images To Registries
run: |
# Authenticate
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
# Trigger the workflow run
jq -n '{"event_type": "canary", "client_payload": {"sha": "${{ github.sha }}", "push_to_registries": ${{ env.PUSH_TO_REGISTRIES }}}}' \
| gh api -X POST /repos/actions-runner-controller/releases/dispatches --input -
- name: Job summary
run: |
echo "The [publish-canary](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-canary.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
echo "- Push to registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml)" >> $GITHUB_STEP_SUMMARY
canary-build:
name: Build and Publish gha-runner-scale-set-controller Canary Image
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -110,16 +71,16 @@ jobs:
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
with:
version: latest
# Unstable builds - run at your own risk
- name: Build and Push
uses: docker/build-push-action@v5
uses: docker/build-push-action@v3
with:
context: .
file: ./Dockerfile

View File

@@ -25,10 +25,10 @@ jobs:
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Install Go
uses: actions/setup-go@v5
uses: actions/setup-go@v4
with:
go-version-file: go.mod

View File

@@ -1,29 +0,0 @@
name: First Interaction
on:
issues:
types: [opened]
pull_request:
branches: [master]
types: [opened]
jobs:
check_for_first_interaction:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/first-interaction@main
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
issue-message: |
Hello! Thank you for filing an issue.
The maintainers will triage your issue shortly.
In the meantime, please take a look at the [troubleshooting guide](https://github.com/actions/actions-runner-controller/blob/master/TROUBLESHOOTING.md) for bug reports.
If this is a feature request, please review our [contribution guidelines](https://github.com/actions/actions-runner-controller/blob/master/CONTRIBUTING.md).
pr-message: |
Hello! Thank you for your contribution.
Please review our [contribution guidelines](https://github.com/actions/actions-runner-controller/blob/master/CONTRIBUTING.md) to understand the project's testing and code conventions.

View File

@@ -1,25 +0,0 @@
name: Run Stale Bot
on:
schedule:
- cron: '30 1 * * *'
permissions:
contents: read
jobs:
stale:
name: Run Stale
runs-on: ubuntu-latest
permissions:
issues: write # for actions/stale to close stale issues
pull-requests: write # for actions/stale to close stale PRs
steps:
- uses: actions/stale@v6
with:
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
# turn off stale for both issues and PRs
days-before-stale: -1
# turn stale back on for issues only
days-before-issue-stale: 30
days-before-issue-close: 14
exempt-issue-labels: 'pinned,security,enhancement,refactor,documentation,chore,bug,dependencies,needs-investigation'

View File

@@ -29,8 +29,8 @@ jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
cache: false
@@ -42,13 +42,13 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
cache: false
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
uses: golangci/golangci-lint-action@v3
with:
only-new-issues: true
version: v1.55.2
@@ -56,8 +56,8 @@ jobs:
generate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
cache: false
@@ -69,8 +69,8 @@ jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
- run: make manifests

View File

@@ -1,5 +1,5 @@
# Build the manager binary
FROM --platform=$BUILDPLATFORM golang:1.22.4 as builder
FROM --platform=$BUILDPLATFORM golang:1.22.1 as builder
WORKDIR /workspace

View File

@@ -6,7 +6,7 @@ endif
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
VERSION ?= dev
COMMIT_SHA = $(shell git rev-parse HEAD)
RUNNER_VERSION ?= 2.317.0
RUNNER_VERSION ?= 2.316.1
TARGETPLATFORM ?= $(shell arch)
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
RUNNER_TAG ?= ${VERSION}

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.9.3
version: 0.9.2
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.9.3"
appVersion: "0.9.2"
home: https://github.com/actions/actions-runner-controller

View File

@@ -79,9 +79,6 @@ spec:
- "--listener-metrics-endpoint="
- "--metrics-addr=0"
{{- end }}
{{- range .Values.flags.excludeLabelPropagationPrefixes }}
- "--exclude-label-propagation-prefix={{ . }}"
{{- end }}
command:
- "/manager"
{{- with .Values.metrics }}

View File

@@ -1035,41 +1035,3 @@ func TestControllerDeployment_MetricsPorts(t *testing.T) {
assert.Equal(t, value.frequency, 1, fmt.Sprintf("frequency of %q is not 1", key))
}
}
func TestDeployment_excludeLabelPropagationPrefixes(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
require.NoError(t, err)
chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml"))
require.NoError(t, err)
chart := new(Chart)
err = yaml.Unmarshal(chartContent, chart)
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"flags.excludeLabelPropagationPrefixes[0]": "prefix.com/",
"flags.excludeLabelPropagationPrefixes[1]": "complete.io/label",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(t, output, &deployment)
require.Len(t, deployment.Spec.Template.Spec.Containers, 1, "Expected one container")
container := deployment.Spec.Template.Spec.Containers[0]
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=prefix.com/")
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=complete.io/label")
}

View File

@@ -121,12 +121,3 @@ flags:
## This can lead to a longer time to apply the change but it will ensure
## that you don't have any overprovisioning of runners.
updateStrategy: "immediate"
## Defines a list of prefixes that should not be propagated to internal resources.
## This is useful when you have labels that are used for internal purposes and should not be propagated to internal resources.
## See https://github.com/actions/actions-runner-controller/issues/3533 for more information.
##
## By default, all labels are propagated to internal resources
## Labels that match prefix specified in the list are excluded from propagation.
# excludeLabelPropagationPrefixes:
# - "argocd.argoproj.io/instance"

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.9.3
version: 0.9.2
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.9.3"
appVersion: "0.9.2"
home: https://github.com/actions/actions-runner-controller

View File

@@ -11,6 +11,7 @@ import (
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"go.opentelemetry.io/otel"
"golang.org/x/sync/errgroup"
)
@@ -105,6 +106,9 @@ func New(config config.Config) (*App, error) {
}
func (app *App) Run(ctx context.Context) error {
ctx, span := otel.Tracer("arc").Start(ctx, "App.Run")
defer span.End()
var errs []error
if app.worker == nil {
errs = append(errs, fmt.Errorf("worker not initialized"))

View File

@@ -7,6 +7,7 @@ import (
listener "github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
mock "github.com/stretchr/testify/mock"
"go.opentelemetry.io/otel"
)
// Listener is an autogenerated mock type for the Listener type
@@ -16,6 +17,9 @@ type Listener struct {
// Listen provides a mock function with given fields: ctx, handler
func (_m *Listener) Listen(ctx context.Context, handler listener.Handler) error {
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.Listen")
defer span.End()
ret := _m.Called(ctx, handler)
var r0 error

View File

@@ -4,6 +4,7 @@ package mocks
import (
actions "github.com/actions/actions-runner-controller/github/actions"
"go.opentelemetry.io/otel"
context "context"
@@ -17,6 +18,9 @@ type Worker struct {
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, acquireCount
func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, acquireCount int) (int, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleDesiredRunnerCount")
defer span.End()
ret := _m.Called(ctx, count, acquireCount)
var r0 int
@@ -41,6 +45,9 @@ func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, acqui
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
func (_m *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleJobStarted")
defer span.End()
ret := _m.Called(ctx, jobInfo)
var r0 error

View File

@@ -7,12 +7,16 @@ import (
"fmt"
"net/http"
"os"
"sync"
"time"
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"github.com/google/uuid"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
)
const (
@@ -125,6 +129,9 @@ type Handler interface {
// The handler is responsible for handling the initial message and subsequent messages.
// If an error occurs during any step, Listen returns an error.
func (l *Listener) Listen(ctx context.Context, handler Handler) error {
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.Listen")
defer span.End()
if err := l.createSession(ctx); err != nil {
return fmt.Errorf("createSession failed: %w", err)
}
@@ -160,34 +167,149 @@ func (l *Listener) Listen(ctx context.Context, handler Handler) error {
default:
}
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.Listen.loop", trace.WithNewRoot())
msg, err := l.getMessage(ctx)
if err != nil {
span.End()
return fmt.Errorf("failed to get message: %w", err)
}
if msg == nil {
_, err := handler.HandleDesiredRunnerCount(ctx, 0, 0)
if err != nil {
span.End()
return fmt.Errorf("handling nil message failed: %w", err)
}
span.End()
continue
}
// Remove cancellation from the context to avoid cancelling the message handling.
if err := l.handleMessage(context.WithoutCancel(ctx), handler, msg); err != nil {
span.End()
return fmt.Errorf("failed to handle message: %w", err)
}
span.End()
}
}
type tracedJob struct {
jobSpan tracer.Span
runnerSetAssignSpan tracer.Span
runnerAssignSpan tracer.Span
runnerRunJobSpan tracer.Span
}
var mu sync.Mutex
var tracedJobs map[string]*tracedJob
func (l *Listener) progressTraces(parsedMsg *parsedMessage) {
mu.Lock()
defer mu.Unlock()
if tracedJobs == nil {
tracedJobs = make(map[string]*tracedJob)
}
for _, j := range parsedMsg.jobsAvailable {
jobSpan := tracer.StartSpan(
"GitHub Actions Workflow Run",
tracer.StartTime(j.QueueTime),
tracer.Tag("runner_request_id", fmt.Sprintf("%d", j.RunnerRequestId)),
tracer.Tag("repository_name", j.RepositoryName),
tracer.Tag("owner_name", j.OwnerName),
tracer.Tag("workflow_ref", fmt.Sprintf("%s", j.JobWorkflowRef)),
tracer.Tag("workflow_run_id", fmt.Sprintf("%d", j.WorkflowRunId)),
)
runnerSetAssignSpan := tracer.StartSpan(
"runnerSetAssign",
tracer.ChildOf(jobSpan.Context()),
tracer.StartTime(j.QueueTime),
tracer.Tag("runner_request_id", fmt.Sprintf("%d", j.RunnerRequestId)),
tracer.Tag("repository_name", j.RepositoryName),
tracer.Tag("owner_name", j.OwnerName),
)
reqID := fmt.Sprintf("%d", j.RunnerRequestId)
tracedJobs[reqID] = &tracedJob{
jobSpan: jobSpan,
runnerSetAssignSpan: runnerSetAssignSpan,
}
l.logger.Info("Listener.progressTraces: Job available", "queueTime", j.QueueTime, "runnerAssignTime", j.ScaleSetAssignTime, "requestLabels", j.RequestLabels, "now", time.Now())
}
for _, j := range parsedMsg.jobsStarted {
reqID := fmt.Sprintf("%d", j.RunnerRequestId)
t := tracedJobs[reqID]
if t == nil {
s := tracer.StartSpan(fmt.Sprintf("%s", j.JobWorkflowRef), tracer.StartTime(j.QueueTime))
tracedJobs[reqID] = &tracedJob{jobSpan: s}
l.logger.Error(errors.New("job and runnerSetAssign spans have not started yet"), "runnerRequestId", j.RunnerRequestId)
} else {
if t.runnerSetAssignSpan == nil {
l.logger.Error(errors.New("runnerSetAssignSpan has not started yet"), "runnerRequestId", j.RunnerRequestId)
} else {
t.runnerSetAssignSpan.Finish(tracer.FinishTime(j.RunnerAssignTime))
}
t.runnerAssignSpan = tracer.StartSpan(
"runnerAssign",
tracer.ChildOf(t.jobSpan.Context()),
tracer.StartTime(j.RunnerAssignTime),
)
now := time.Now()
t.runnerAssignSpan.Finish(tracer.FinishTime(now))
t.runnerRunJobSpan = tracer.StartSpan(
"runnerRunJob",
tracer.ChildOf(t.jobSpan.Context()),
tracer.StartTime(now),
)
l.logger.Info("Listener.progressTraces: Job started", "queueTime", j.QueueTime, "runnerAssignTime", j.RunnerAssignTime, "requestLabels", j.RequestLabels, "now", now)
}
}
for _, j := range parsedMsg.jobsCompleted {
reqID := fmt.Sprintf("%d", j.RunnerRequestId)
t := tracedJobs[reqID]
if t == nil {
s := tracer.StartSpan(fmt.Sprintf("%s", j.JobWorkflowRef), tracer.StartTime(j.QueueTime))
t = &tracedJob{jobSpan: s}
tracedJobs[reqID] = t
l.logger.Error(errors.New("job, runnerSetAssign and runnerAssign spans have not started yet"), "runnerRequestId", j.RunnerRequestId)
} else {
if t.runnerRunJobSpan == nil {
l.logger.Error(errors.New("runnerRunJobSPan has not started yet"), "runnerRequestId", j.RunnerRequestId)
} else {
t.runnerRunJobSpan.Finish(tracer.FinishTime(j.FinishTime))
}
}
s := t.jobSpan
s.Finish(tracer.FinishTime(j.FinishTime))
delete(tracedJobs, reqID)
}
}
func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *actions.RunnerScaleSetMessage) error {
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.handleMessage")
defer span.End()
parsedMsg, err := l.parseMessage(ctx, msg)
if err != nil {
return fmt.Errorf("failed to parse message: %w", err)
}
l.metrics.PublishStatistics(parsedMsg.statistics)
l.progressTraces(parsedMsg)
if len(parsedMsg.jobsAvailable) > 0 {
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, parsedMsg.jobsAvailable)
if err != nil {
@@ -223,6 +345,9 @@ func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *acti
}
func (l *Listener) createSession(ctx context.Context) error {
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.createSession")
defer span.End()
var session *actions.RunnerScaleSetSession
var retries int
@@ -268,6 +393,9 @@ func (l *Listener) createSession(ctx context.Context) error {
}
func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessage, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.getMessage")
defer span.End()
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
msg, err := l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity)
if err == nil { // if NO error
@@ -294,6 +422,9 @@ func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessa
}
func (l *Listener) deleteLastMessage(ctx context.Context) error {
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.deleteLastMessage")
defer span.End()
l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID)
err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
if err == nil { // if NO error
@@ -325,6 +456,9 @@ type parsedMessage struct {
}
func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSetMessage) (*parsedMessage, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.parseMessage")
defer span.End()
if msg.MessageType != "RunnerScaleSetJobMessages" {
l.logger.Info("Skipping message", "messageType", msg.MessageType)
return nil, fmt.Errorf("invalid message type: %s", msg.MessageType)
@@ -398,6 +532,9 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
}
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.acquireAvailableJobs", trace.WithLinks())
defer span.End()
ids := make([]int64, 0, len(jobsAvailable))
for _, job := range jobsAvailable {
ids = append(ids, job.RunnerRequestId)
@@ -428,6 +565,9 @@ func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*ac
}
func (l *Listener) refreshSession(ctx context.Context) error {
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.refreshSession")
defer span.End()
l.logger.Info("Message queue token is expired during GetNextMessage, refreshing...")
session, err := l.client.RefreshMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId)
if err != nil {

View File

@@ -6,6 +6,7 @@ import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
"go.opentelemetry.io/otel"
mock "github.com/stretchr/testify/mock"
@@ -19,6 +20,9 @@ type Client struct {
// AcquireJobs provides a mock function with given fields: ctx, runnerScaleSetId, messageQueueAccessToken, requestIds
func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "Client.AcquireJobs")
defer span.End()
ret := _m.Called(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
var r0 []int64
@@ -45,6 +49,9 @@ func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, message
// CreateMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, owner
func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "Client.CreateMessageSession")
defer span.End()
ret := _m.Called(ctx, runnerScaleSetId, owner)
var r0 *actions.RunnerScaleSetSession
@@ -71,6 +78,9 @@ func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int
// DeleteMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, messageId
func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, messageId int64) error {
ctx, span := otel.Tracer("arc").Start(ctx, "Client.DeleteMessage")
defer span.End()
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
var r0 error
@@ -85,6 +95,9 @@ func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, mes
// DeleteMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error {
ctx, span := otel.Tracer("arc").Start(ctx, "Client.DeleteMessageSession")
defer span.End()
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
var r0 error
@@ -99,6 +112,9 @@ func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int
// GetAcquirableJobs provides a mock function with given fields: ctx, runnerScaleSetId
func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "Client.GetAcquirableJobs")
defer span.End()
ret := _m.Called(ctx, runnerScaleSetId)
var r0 *actions.AcquirableJobList
@@ -125,6 +141,9 @@ func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (
// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity
func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "Client.GetMessage")
defer span.End()
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
var r0 *actions.RunnerScaleSetMessage
@@ -151,6 +170,9 @@ func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messag
// RefreshMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
func (_m *Client) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "Client.RefreshMessageSession")
defer span.End()
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
var r0 *actions.RunnerScaleSetSession

View File

@@ -6,6 +6,7 @@ import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
"go.opentelemetry.io/otel"
mock "github.com/stretchr/testify/mock"
)
@@ -17,6 +18,9 @@ type Handler struct {
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, jobsCompleted
func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "Handler.HandleDesiredRunnerCount")
defer span.End()
ret := _m.Called(ctx, count, jobsCompleted)
var r0 int
@@ -41,6 +45,9 @@ func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobs
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
func (_m *Handler) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
ctx, span := otel.Tracer("arc").Start(ctx, "Handler.HandleJobStarted")
defer span.End()
ret := _m.Called(ctx, jobInfo)
var r0 error

View File

@@ -10,9 +10,30 @@ import (
"github.com/actions/actions-runner-controller/cmd/ghalistener/app"
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
"go.opentelemetry.io/otel"
ddotel "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentelemetry"
"go.opentelemetry.io/otel/exporters/stdout/stdoutlog"
"go.opentelemetry.io/otel/log/global"
otellog "go.opentelemetry.io/otel/sdk/log"
)
func main() {
provider := ddotel.NewTracerProvider()
defer provider.Shutdown()
otel.SetTracerProvider(provider)
loggerProvider, err := newLoggerProvider()
if err != nil {
return
}
global.SetLoggerProvider(loggerProvider)
log.Printf("Enabled OpenTelemetry Tracing")
configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
if !ok {
fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
@@ -38,3 +59,15 @@ func main() {
os.Exit(1)
}
}
func newLoggerProvider() (*otellog.LoggerProvider, error) {
logExporter, err := stdoutlog.New()
if err != nil {
return nil, err
}
loggerProvider := otellog.NewLoggerProvider(
otellog.WithProcessor(otellog.NewBatchProcessor(logExporter)),
)
return loggerProvider, nil
}

View File

@@ -3,6 +3,7 @@ package metrics
import (
"context"
"net/http"
"os"
"strconv"
"time"
@@ -10,6 +11,7 @@ import (
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.opentelemetry.io/otel"
)
const (
@@ -45,14 +47,25 @@ var (
labelKeyJobName,
labelKeyJobWorkflowRef,
labelKeyEventName,
labelKeyRunnerID,
labelKeyRunnerName,
}
completedJobsTotalLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
startedJobsTotalLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
jobStartupDurationLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
completedJobLabels []string
includeRunnerScaleSetNameInJobLabels = false
)
func init() {
if os.Getenv("INCLUDE_RUNNER_SCALE_SET_NAME_IN_JOB_LABELS") == "true" {
includeRunnerScaleSetNameInJobLabels = true
jobLabels = append(jobLabels, labelKeyRunnerScaleSetName)
}
completedJobLabels = append([]string{}, append(jobLabels, labelKeyJobResult)...)
}
var (
assignedJobs = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
@@ -132,7 +145,7 @@ var (
Name: "started_jobs_total",
Help: "Total number of jobs started.",
},
startedJobsTotalLabels,
jobLabels,
)
completedJobsTotal = prometheus.NewCounterVec(
@@ -141,7 +154,7 @@ var (
Help: "Total number of jobs completed.",
Subsystem: githubScaleSetSubsystem,
},
completedJobsTotalLabels,
completedJobLabels,
)
jobStartupDurationSeconds = prometheus.NewHistogramVec(
@@ -151,7 +164,7 @@ var (
Help: "Time spent waiting for workflow job to get started on the runner owned by the scale set (in seconds).",
Buckets: runtimeBuckets,
},
jobStartupDurationLabels,
jobLabels,
)
jobExecutionDurationSeconds = prometheus.NewHistogramVec(
@@ -161,7 +174,7 @@ var (
Help: "Time spent executing workflow jobs by the scale set (in seconds).",
Buckets: runtimeBuckets,
},
jobExecutionDurationLabels,
completedJobLabels,
)
)
@@ -222,7 +235,7 @@ type baseLabels struct {
}
func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
return prometheus.Labels{
l := prometheus.Labels{
labelKeyEnterprise: b.enterprise,
labelKeyOrganization: jobBase.OwnerName,
labelKeyRepository: jobBase.RepositoryName,
@@ -230,6 +243,10 @@ func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Label
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
labelKeyEventName: jobBase.EventName,
}
if includeRunnerScaleSetNameInJobLabels {
l[labelKeyRunnerScaleSetName] = b.scaleSetName
}
}
func (b *baseLabels) scaleSetLabels() prometheus.Labels {
@@ -336,6 +353,9 @@ func NewExporter(config ExporterConfig) ServerPublisher {
}
func (e *exporter) ListenAndServe(ctx context.Context) error {
ctx, span := otel.Tracer("arc").Start(ctx, "exporter.ListenAndServe")
defer span.End()
e.logger.Info("starting metrics server", "addr", e.srv.Addr)
go func() {
<-ctx.Done()

View File

@@ -6,6 +6,7 @@ import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
"go.opentelemetry.io/otel"
mock "github.com/stretchr/testify/mock"
)
@@ -17,6 +18,9 @@ type ServerPublisher struct {
// ListenAndServe provides a mock function with given fields: ctx
func (_m *ServerPublisher) ListenAndServe(ctx context.Context) error {
ctx, span := otel.Tracer("arc").Start(ctx, "ServerPublisher.ListenAndServe")
defer span.End()
ret := _m.Called(ctx)
var r0 error

View File

@@ -11,6 +11,8 @@ import (
"github.com/actions/actions-runner-controller/logging"
jsonpatch "github.com/evanphx/json-patch"
"github.com/go-logr/logr"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
@@ -96,6 +98,16 @@ func (w *Worker) applyDefaults() error {
// about the ephemeral runner that should not be deleted when scaling down.
// It returns an error if there is any issue with updating the job information.
func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleJobStarted")
defer span.End()
span.SetAttributes(
attribute.String("runner.name", jobInfo.RunnerName),
attribute.String("runner.repo.name", jobInfo.RepositoryName),
attribute.String("workflow.ref", jobInfo.JobWorkflowRef),
attribute.Int64("workflow.run.id", jobInfo.WorkflowRunId),
)
w.logger.Info("Updating job info for the runner",
"runnerName", jobInfo.RunnerName,
"ownerName", jobInfo.OwnerName,
@@ -164,6 +176,9 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
// Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
// If any error occurs during the process, it returns an error with a descriptive message.
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleDesiredRunnerCount")
defer span.End()
patchID := w.setDesiredWorkerState(count, jobsCompleted)
original, err := json.Marshal(

View File

@@ -8,6 +8,7 @@ import (
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
jsonpatch "github.com/evanphx/json-patch"
"github.com/go-logr/logr"
"go.opentelemetry.io/otel"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
@@ -38,6 +39,9 @@ func NewKubernetesManager(logger *logr.Logger) (*AutoScalerKubernetesManager, er
}
func (k *AutoScalerKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace, resourceName string, runnerCount int) error {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerKubernetesManager.ScaleEphemeralRunnerSet")
defer span.End()
original := &v1alpha1.EphemeralRunnerSet{
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: -1,
@@ -83,6 +87,9 @@ func (k *AutoScalerKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Contex
}
func (k *AutoScalerKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName string, workflowRunId, jobRequestId int64) error {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerKubernetesManager.UpdateEphemeralRunnerWithJobInfo")
defer span.End()
original := &v1alpha1.EphemeralRunner{}
originalJson, err := json.Marshal(original)
if err != nil {

View File

@@ -13,6 +13,7 @@ import (
"github.com/go-logr/logr"
"github.com/google/uuid"
"github.com/pkg/errors"
"go.opentelemetry.io/otel"
)
const (
@@ -38,6 +39,9 @@ func NewAutoScalerClient(
runnerScaleSetId int,
options ...func(*AutoScalerClient),
) (*AutoScalerClient, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "NewAutoScalerClient")
defer span.End()
listener := AutoScalerClient{
logger: logger.WithName("auto_scaler"),
}
@@ -59,6 +63,9 @@ func NewAutoScalerClient(
}
func createSession(ctx context.Context, logger *logr.Logger, client actions.ActionsService, runnerScaleSetId int) (*actions.RunnerScaleSetSession, *actions.RunnerScaleSetMessage, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "createSession")
defer span.End()
hostName, err := os.Hostname()
if err != nil {
hostName = uuid.New().String()
@@ -130,6 +137,9 @@ func (m *AutoScalerClient) Close() error {
}
func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error, maxCapacity int) error {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerClient.GetRunnerScaleSetMessage")
defer span.End()
if m.initialMessage != nil {
err := handler(m.initialMessage)
if err != nil {
@@ -162,6 +172,9 @@ func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler
}
func (m *AutoScalerClient) deleteMessage(ctx context.Context, messageId int64) error {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerClient.deleteMessage")
defer span.End()
err := m.client.DeleteMessage(ctx, messageId)
if err != nil {
return fmt.Errorf("delete message failed from refreshing client. %w", err)
@@ -172,6 +185,9 @@ func (m *AutoScalerClient) deleteMessage(ctx context.Context, messageId int64) e
}
func (m *AutoScalerClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerClient.AcquireJobsForRunnerScaleSet")
defer span.End()
m.logger.Info("acquiring jobs.", "request count", len(requestIds), "requestIds", fmt.Sprint(requestIds))
if len(requestIds) == 0 {
return nil

View File

@@ -10,6 +10,7 @@ import (
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"go.opentelemetry.io/otel"
)
type ScaleSettings struct {
@@ -60,6 +61,9 @@ func NewService(
settings *ScaleSettings,
options ...func(*Service),
) (*Service, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "NewService")
defer span.End()
s := &Service{
ctx: ctx,
rsClient: rsClient,

View File

@@ -34,6 +34,7 @@ import (
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.opentelemetry.io/otel"
"golang.org/x/net/http/httpproxy"
"golang.org/x/sync/errgroup"
)
@@ -155,6 +156,9 @@ type runOptions struct {
}
func run(ctx context.Context, rc config.Config, logger logr.Logger, opts runOptions) error {
ctx, span := otel.Tracer("arc").Start(ctx, "run")
defer span.End()
// Create root context and hook with sigint and sigterm
creds := &actions.ActionsAuth{}
if rc.Token != "" {

View File

@@ -6,6 +6,7 @@ import (
context "context"
mock "github.com/stretchr/testify/mock"
"go.opentelemetry.io/otel"
)
// MockKubernetesManager is an autogenerated mock type for the KubernetesManager type
@@ -15,6 +16,9 @@ type MockKubernetesManager struct {
// ScaleEphemeralRunnerSet provides a mock function with given fields: ctx, namespace, resourceName, runnerCount
func (_m *MockKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace string, resourceName string, runnerCount int) error {
ctx, span := otel.Tracer("arc").Start(ctx, "MockKubernetesManager.ScaleEphemeralRunnerSet")
defer span.End()
ret := _m.Called(ctx, namespace, resourceName, runnerCount)
var r0 error
@@ -29,6 +33,9 @@ func (_m *MockKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, na
// UpdateEphemeralRunnerWithJobInfo provides a mock function with given fields: ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId
func (_m *MockKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace string, resourceName string, ownerName string, repositoryName string, jobWorkflowRef string, jobDisplayName string, jobRequestId int64, workflowRunId int64) error {
ctx, span := otel.Tracer("arc").Start(ctx, "MockKubernetesManager.UpdateEphemeralRunnerWithJobInfo")
defer span.End()
ret := _m.Called(ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId)
var r0 error

View File

@@ -6,6 +6,7 @@ import (
context "context"
actions "github.com/actions/actions-runner-controller/github/actions"
"go.opentelemetry.io/otel"
mock "github.com/stretchr/testify/mock"
)
@@ -17,6 +18,9 @@ type MockRunnerScaleSetClient struct {
// AcquireJobsForRunnerScaleSet provides a mock function with given fields: ctx, requestIds
func (_m *MockRunnerScaleSetClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error {
ctx, span := otel.Tracer("arc").Start(ctx, "MockRunnerScaleSetClient.AcquireJobsForRunnerScaleSet")
defer span.End()
ret := _m.Called(ctx, requestIds)
var r0 error
@@ -31,6 +35,9 @@ func (_m *MockRunnerScaleSetClient) AcquireJobsForRunnerScaleSet(ctx context.Con
// GetRunnerScaleSetMessage provides a mock function with given fields: ctx, handler, maxCapacity
func (_m *MockRunnerScaleSetClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(*actions.RunnerScaleSetMessage) error, maxCapacity int) error {
ctx, span := otel.Tracer("arc").Start(ctx, "MockRunnerScaleSetClient.GetRunnerScaleSetMessage")
defer span.End()
ret := _m.Called(ctx, handler, maxCapacity)
var r0 error

View File

@@ -8,6 +8,7 @@ import (
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"github.com/pkg/errors"
"go.opentelemetry.io/otel"
)
type SessionRefreshingClient struct {
@@ -25,6 +26,9 @@ func newSessionClient(client actions.ActionsService, logger *logr.Logger, sessio
}
func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "SessionRefreshingClient.GetMessage")
defer span.End()
if maxCapacity < 0 {
return nil, fmt.Errorf("maxCapacity must be greater than or equal to 0")
}
@@ -55,6 +59,9 @@ func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId
}
func (m *SessionRefreshingClient) DeleteMessage(ctx context.Context, messageId int64) error {
ctx, span := otel.Tracer("arc").Start(ctx, "SessionRefreshingClient.DeleteMessage")
defer span.End()
err := m.client.DeleteMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, messageId)
if err == nil {
return nil
@@ -82,6 +89,9 @@ func (m *SessionRefreshingClient) DeleteMessage(ctx context.Context, messageId i
}
func (m *SessionRefreshingClient) AcquireJobs(ctx context.Context, requestIds []int64) ([]int64, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "SessionRefreshingClient.AcquireJobs")
defer span.End()
ids, err := m.client.AcquireJobs(ctx, m.session.RunnerScaleSet.Id, m.session.MessageQueueAccessToken, requestIds)
if err == nil {
return ids, nil

View File

@@ -21,6 +21,8 @@ import (
"fmt"
"github.com/go-logr/logr"
"go.opentelemetry.io/otel"
otelCodes "go.opentelemetry.io/otel/codes"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
@@ -55,7 +57,7 @@ type AutoscalingListenerReconciler struct {
ListenerMetricsAddr string
ListenerMetricsEndpoint string
ResourceBuilder
resourceBuilder resourceBuilder
}
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
@@ -70,6 +72,9 @@ type AutoscalingListenerReconciler struct {
// Reconcile a AutoscalingListener resource to meet its desired spec.
func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.Reconcile")
defer span.End()
log := r.Log.WithValues("autoscalinglistener", req.NamespacedName)
autoscalingListener := new(v1alpha1.AutoscalingListener)
@@ -242,27 +247,17 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, mirrorSecret, log)
}
cs := listenerContainerStatus(listenerPod)
switch {
case cs == nil:
log.Info("Listener pod is not ready", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return ctrl.Result{}, nil
case cs.State.Terminated != nil:
log.Info("Listener pod is terminated", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", cs.State.Terminated.Reason, "message", cs.State.Terminated.Message)
if err := r.publishRunningListener(autoscalingListener, false); err != nil {
log.Error(err, "Unable to publish runner listener down metric", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
// The listener pod failed might mean the mirror secret is out of date
// Delete the listener pod and re-create it to make sure the mirror secret is up to date
if listenerPod.Status.Phase == corev1.PodFailed && listenerPod.DeletionTimestamp.IsZero() {
log.Info("Listener pod failed, deleting it and re-creating it", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", listenerPod.Status.Reason, "message", listenerPod.Status.Message)
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return ctrl.Result{}, err
}
}
if listenerPod.DeletionTimestamp.IsZero() {
log.Info("Deleting the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
return ctrl.Result{}, err
}
}
return ctrl.Result{}, nil
case cs.State.Running != nil:
if listenerPod.Status.Phase == corev1.PodRunning {
if err := r.publishRunningListener(autoscalingListener, true); err != nil {
log.Error(err, "Unable to publish running listener", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
// stop reconciling. We should never get to this point but if we do,
@@ -270,12 +265,21 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
// notify the reconciler again.
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
return ctrl.Result{}, nil
}
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.cleanupResources")
defer span.End()
defer func() {
if err != nil {
span.SetStatus(otelCodes.Error, "error")
span.RecordError(err)
}
}()
logger.Info("Cleaning up the listener pod")
listenerPod := new(corev1.Pod)
err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerPod)
@@ -383,7 +387,10 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
}
func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
newServiceAccount := r.ResourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createServiceAccountForListener")
defer span.End()
newServiceAccount := r.resourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil {
return ctrl.Result{}, err
@@ -400,6 +407,9 @@ func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx cont
}
func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createListenerPod")
defer span.End()
var envs []corev1.EnvVar
if autoscalingListener.Spec.Proxy != nil {
httpURL := corev1.EnvVar{
@@ -468,7 +478,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
logger.Info("Creating listener config secret")
podConfig, err := r.ResourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
podConfig, err := r.resourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
if err != nil {
logger.Error(err, "Failed to build listener config secret")
return ctrl.Result{}, err
@@ -487,7 +497,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
return ctrl.Result{Requeue: true}, nil
}
newPod, err := r.ResourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
newPod, err := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
if err != nil {
logger.Error(err, "Failed to build listener pod")
return ctrl.Result{}, err
@@ -509,6 +519,9 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
}
func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener) (string, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.certificate")
defer span.End()
if autoscalingListener.Spec.GitHubServerTLS.CertificateFrom == nil {
return "", fmt.Errorf("githubServerTLS.certificateFrom is not specified")
}
@@ -547,7 +560,10 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
}
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
newListenerSecret := r.ResourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createSecretsForListener")
defer span.End()
newListenerSecret := r.resourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
return ctrl.Result{}, err
@@ -564,6 +580,9 @@ func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Con
}
func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createProxySecret")
defer span.End()
data, err := autoscalingListener.Spec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) {
var secret corev1.Secret
err := r.Get(ctx, types.NamespacedName{Name: s, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, &secret)
@@ -603,6 +622,9 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a
}
func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.updateSecretsForListener")
defer span.End()
dataHash := hash.ComputeTemplateHash(secret.Data)
updatedMirrorSecret := mirrorSecret.DeepCopy()
updatedMirrorSecret.Labels["secret-data-hash"] = dataHash
@@ -619,7 +641,10 @@ func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Con
}
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
newRole := r.ResourceBuilder.newScaleSetListenerRole(autoscalingListener)
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createRoleForListener")
defer span.End()
newRole := r.resourceBuilder.newScaleSetListenerRole(autoscalingListener)
logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
if err := r.Create(ctx, newRole); err != nil {
@@ -632,6 +657,9 @@ func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Contex
}
func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Context, listenerRole *rbacv1.Role, desiredRules []rbacv1.PolicyRule, desiredRulesHash string, logger logr.Logger) (ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.updateRoleForListener")
defer span.End()
updatedPatchRole := listenerRole.DeepCopy()
updatedPatchRole.Labels["role-policy-rules-hash"] = desiredRulesHash
updatedPatchRole.Rules = desiredRules
@@ -647,7 +675,10 @@ func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Contex
}
func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) {
newRoleBinding := r.ResourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createRoleBindingForListener")
defer span.End()
newRoleBinding := r.resourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
logger.Info("Creating listener role binding",
"namespace", newRoleBinding.Namespace,
@@ -732,13 +763,3 @@ func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
Complete(r)
}
func listenerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {
for i := range pod.Status.ContainerStatuses {
cs := &pod.Status.ContainerStatuses[i]
if cs.Name == autoscalingListenerContainerName {
return cs
}
}
return nil
}

View File

@@ -25,7 +25,7 @@ import (
)
const (
autoscalingListenerTestTimeout = time.Second * 20
autoscalingListenerTestTimeout = time.Second * 5
autoscalingListenerTestInterval = time.Millisecond * 250
autoscalingListenerTestGitHubToken = "gh_token"
)
@@ -351,53 +351,6 @@ var _ = Describe("Test AutoScalingListener controller", func() {
autoscalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated")
})
It("It should re-create pod whenever listener container is terminated", func() {
// Waiting for the pod is created
pod := new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return pod.Name, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
oldPodUID := string(pod.UID)
updated := pod.DeepCopy()
updated.Status.ContainerStatuses = []corev1.ContainerStatus{
{
Name: autoscalingListenerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
},
}
err := k8sClient.Status().Update(ctx, updated)
Expect(err).NotTo(HaveOccurred(), "failed to update test pod")
// Waiting for the new pod is created
Eventually(
func() (string, error) {
pod := new(corev1.Pod)
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return string(pod.UID), nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be re-created")
})
It("It should update mirror secrets to match secret used by AutoScalingRunnerSet", func() {
// Waiting for the pod is created
pod := new(corev1.Pod)
@@ -420,18 +373,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
Expect(err).NotTo(HaveOccurred(), "failed to update test secret")
updatedPod := pod.DeepCopy()
// Ignore status running and consult the container state
updatedPod.Status.Phase = corev1.PodRunning
updatedPod.Status.ContainerStatuses = []corev1.ContainerStatus{
{
Name: autoscalingListenerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
},
},
},
}
updatedPod.Status.Phase = corev1.PodFailed
err = k8sClient.Status().Update(ctx, updatedPod)
Expect(err).NotTo(HaveOccurred(), "failed to update test pod to failed")
@@ -478,7 +420,6 @@ var _ = Describe("Test AutoScalingListener customization", func() {
var autoscalingListener *v1alpha1.AutoscalingListener
var runAsUser int64 = 1001
const sidecarContainerName = "sidecar"
listenerPodTemplate := corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
@@ -491,7 +432,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
},
},
{
Name: sidecarContainerName,
Name: "sidecar",
ImagePullPolicy: corev1.PullIfNotPresent,
Image: "busybox",
},
@@ -584,8 +525,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
return created.Finalizers[0], nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
// Check if config is created
config := new(corev1.Secret)
@@ -619,100 +559,11 @@ var _ = Describe("Test AutoScalingListener customization", func() {
Expect(pod.Spec.Containers[0].SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
Expect(pod.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways), "Pod should have the correct image pull policy")
Expect(pod.Spec.Containers[1].Name).To(Equal(sidecarContainerName), "Pod should have the correct container name")
Expect(pod.Spec.Containers[1].Name).To(Equal("sidecar"), "Pod should have the correct container name")
Expect(pod.Spec.Containers[1].Image).To(Equal("busybox"), "Pod should have the correct image")
Expect(pod.Spec.Containers[1].ImagePullPolicy).To(Equal(corev1.PullIfNotPresent), "Pod should have the correct image pull policy")
})
})
Context("When AutoscalingListener pod has interuptions", func() {
It("Should re-create pod when it is deleted", func() {
pod := new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return pod.Name, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
Expect(len(pod.Spec.Containers)).To(Equal(2), "Pod should have 2 containers")
oldPodUID := string(pod.UID)
err := k8sClient.Delete(ctx, pod)
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
pod = new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return string(pod.UID), nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be created")
})
It("Should re-create pod when the listener pod is terminated", func() {
pod := new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return pod.Name, nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
updated := pod.DeepCopy()
oldPodUID := string(pod.UID)
updated.Status.ContainerStatuses = []corev1.ContainerStatus{
{
Name: autoscalingListenerContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
},
},
},
{
Name: sidecarContainerName,
State: corev1.ContainerState{
Running: &corev1.ContainerStateRunning{},
},
},
}
err := k8sClient.Status().Update(ctx, updated)
Expect(err).NotTo(HaveOccurred(), "failed to update pod status")
pod = new(corev1.Pod)
Eventually(
func() (string, error) {
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
if err != nil {
return "", err
}
return string(pod.UID), nil
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval,
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be created")
})
})
})
var _ = Describe("Test AutoScalingListener controller with proxy", func() {
@@ -1036,6 +887,7 @@ var _ = Describe("Test AutoScalingListener controller with template modification
g.Expect(pod.ObjectMeta.Annotations).To(HaveKeyWithValue("test-annotation-key", "test-annotation-value"), "pod annotations should be copied from runner set template")
g.Expect(pod.ObjectMeta.Labels).To(HaveKeyWithValue("test-label-key", "test-label-value"), "pod labels should be copied from runner set template")
},
autoscalingListenerTestTimeout,
autoscalingListenerTestInterval).Should(Succeed(), "failed to create listener pod with proxy details")

View File

@@ -27,6 +27,8 @@ import (
"github.com/actions/actions-runner-controller/build"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"go.opentelemetry.io/otel"
otelCodes "go.opentelemetry.io/otel/codes"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
@@ -79,7 +81,8 @@ type AutoscalingRunnerSetReconciler struct {
DefaultRunnerScaleSetListenerImagePullSecrets []string
UpdateStrategy UpdateStrategy
ActionsClient actions.MultiClient
ResourceBuilder
resourceBuilder resourceBuilder
}
// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets,verbs=get;list;watch;create;update;patch;delete
@@ -92,6 +95,9 @@ type AutoscalingRunnerSetReconciler struct {
// Reconcile a AutoscalingRunnerSet resource to meet its desired spec.
func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.Reconcile")
defer span.End()
log := r.Log.WithValues("autoscalingrunnerset", req.NamespacedName)
autoscalingRunnerSet := new(v1alpha1.AutoscalingRunnerSet)
@@ -133,11 +139,17 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return ctrl.Result{}, err
}
if err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log); err != nil {
requeue, err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log)
if err != nil {
log.Error(err, "Failed to remove finalizers on dependent resources")
return ctrl.Result{}, err
}
if requeue {
log.Info("Waiting for dependent resources to be deleted")
return ctrl.Result{Requeue: true}, nil
}
log.Info("Removing finalizer")
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName)
@@ -327,6 +339,15 @@ func (r *AutoscalingRunnerSetReconciler) drainingJobs(latestRunnerSetStatus *v1a
}
func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.cleanupListener")
defer span.End()
defer func() {
if err != nil {
span.SetStatus(otelCodes.Error, "error")
span.RecordError(err)
}
}()
logger.Info("Cleaning up the listener")
var listener v1alpha1.AutoscalingListener
err = r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, &listener)
@@ -348,6 +369,15 @@ func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, au
}
func (r *AutoscalingRunnerSetReconciler) cleanupEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.cleanupEphemeralRunnerSets")
defer span.End()
defer func() {
if err != nil {
span.SetStatus(otelCodes.Error, "error")
span.RecordError(err)
}
}()
logger.Info("Cleaning up ephemeral runner sets")
runnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet)
if err != nil {
@@ -366,6 +396,9 @@ func (r *AutoscalingRunnerSetReconciler) cleanupEphemeralRunnerSets(ctx context.
}
func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.Context, oldRunnerSets []v1alpha1.EphemeralRunnerSet, logger logr.Logger) error {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.deleteEphemeralRunnerSets")
defer span.End()
for i := range oldRunnerSets {
rs := &oldRunnerSets[i]
// already deleted but contains finalizer so it still exists
@@ -382,7 +415,16 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
return nil
}
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (requeue bool, err error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.removeFinalizersFromDependentResources")
defer span.End()
defer func() {
if err != nil {
span.SetStatus(otelCodes.Error, "error")
span.RecordError(err)
}
}()
c := autoscalingRunnerSetFinalizerDependencyCleaner{
client: r.Client,
autoscalingRunnerSet: autoscalingRunnerSet,
@@ -397,10 +439,18 @@ func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(
c.removeManagerRoleBindingFinalizer(ctx)
c.removeManagerRoleFinalizer(ctx)
return c.Err()
requeue, err = c.result()
if err != nil {
logger.Error(err, "Failed to cleanup finalizer from dependent resource")
return true, err
}
return requeue, nil
}
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.createRunnerScaleSet")
defer span.End()
logger.Info("Creating a new runner scale set")
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
if len(autoscalingRunnerSet.Spec.RunnerScaleSetName) == 0 {
@@ -492,6 +542,9 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
}
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.updateRunnerScaleSetRunnerGroup")
defer span.End()
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
logger.Error(err, "Failed to parse runner scale set ID")
@@ -535,6 +588,9 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
}
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.updateRunnerScaleSetName")
defer span.End()
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
logger.Error(err, "Failed to parse runner scale set ID")
@@ -571,6 +627,9 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
}
func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.deleteRunnerScaleSet")
defer span.End()
scaleSetId, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
if !ok {
// Annotation not being present can occur in 3 scenarios
@@ -622,7 +681,10 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
}
func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) {
desiredRunnerSet, err := r.ResourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.createEphemeralRunnerSet")
defer span.End()
desiredRunnerSet, err := r.resourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
if err != nil {
log.Error(err, "Could not create EphemeralRunnerSet")
return ctrl.Result{}, err
@@ -644,6 +706,9 @@ func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Co
}
func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.createAutoScalingListenerForRunnerSet")
defer span.End()
var imagePullSecrets []corev1.LocalObjectReference
for _, imagePullSecret := range r.DefaultRunnerScaleSetListenerImagePullSecrets {
imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{
@@ -651,7 +716,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
})
}
autoscalingListener, err := r.ResourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
autoscalingListener, err := r.resourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
if err != nil {
log.Error(err, "Could not create AutoscalingListener spec")
return ctrl.Result{}, err
@@ -668,6 +733,9 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
}
func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.listEphemeralRunnerSets")
defer span.End()
list := new(v1alpha1.EphemeralRunnerSetList)
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingRunnerSet.Name}); err != nil {
return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
@@ -677,6 +745,9 @@ func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Con
}
func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (actions.ActionsService, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.actionsClientFor")
defer span.End()
var configSecret corev1.Secret
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, &configSecret); err != nil {
return nil, fmt.Errorf("failed to find GitHub config secret: %w", err)
@@ -697,6 +768,9 @@ func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, a
}
func (r *AutoscalingRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) ([]actions.ClientOption, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.actionsClientOptionsFor")
defer span.End()
var options []actions.ClientOption
if autoscalingRunnerSet.Spec.Proxy != nil {
@@ -772,16 +846,20 @@ type autoscalingRunnerSetFinalizerDependencyCleaner struct {
autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
logger logr.Logger
err error
// fields to operate on
requeue bool
err error
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) Err() error {
return c.err
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) result() (requeue bool, err error) {
return c.requeue, c.err
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleBindingFinalizer(ctx context.Context) {
if c.err != nil {
c.logger.Info("Skipping cleaning up kubernetes mode service account")
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeKubernetesModeRoleBindingFinalizer")
defer span.End()
if c.requeue || c.err != nil {
return
}
@@ -812,6 +890,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes role binding", "name", roleBindingName)
return
case err != nil && !kerrors.IsNotFound(err):
@@ -824,7 +903,10 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleFinalizer(ctx context.Context) {
if c.err != nil {
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeKubernetesModeRoleFinalizer")
defer span.End()
if c.requeue || c.err != nil {
return
}
@@ -854,6 +936,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes role")
return
case err != nil && !kerrors.IsNotFound(err):
@@ -866,7 +949,10 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeServiceAccountFinalizer(ctx context.Context) {
if c.err != nil {
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeKubernetesModeServiceAccountFinalizer")
defer span.End()
if c.requeue || c.err != nil {
return
}
@@ -897,6 +983,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeSer
c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from container mode kubernetes service account")
return
case err != nil && !kerrors.IsNotFound(err):
@@ -909,7 +996,10 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeSer
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServiceAccountFinalizer(ctx context.Context) {
if c.err != nil {
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeNoPermissionServiceAccountFinalizer")
defer span.End()
if c.requeue || c.err != nil {
return
}
@@ -940,6 +1030,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServi
c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from no permission service account", "name", serviceAccountName)
return
case err != nil && !kerrors.IsNotFound(err):
@@ -952,7 +1043,10 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServi
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinalizer(ctx context.Context) {
if c.err != nil {
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeGitHubSecretFinalizer")
defer span.End()
if c.requeue || c.err != nil {
return
}
@@ -983,6 +1077,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinal
c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from GitHub secret", "name", githubSecretName)
return
case err != nil && !kerrors.IsNotFound(err) && !kerrors.IsForbidden(err):
@@ -995,7 +1090,10 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinal
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindingFinalizer(ctx context.Context) {
if c.err != nil {
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeManagerRoleBindingFinalizer")
defer span.End()
if c.requeue || c.err != nil {
return
}
@@ -1026,6 +1124,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindin
c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from manager role binding", "name", managerRoleBindingName)
return
case err != nil && !kerrors.IsNotFound(err):
@@ -1038,7 +1137,10 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindin
}
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinalizer(ctx context.Context) {
if c.err != nil {
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeManagerRoleFinalizer")
defer span.End()
if c.requeue || c.err != nil {
return
}
@@ -1069,6 +1171,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinali
c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err)
return
}
c.requeue = true
c.logger.Info("Removed finalizer from manager role", "name", managerRoleName)
return
case err != nil && !kerrors.IsNotFound(err):

View File

@@ -34,7 +34,7 @@ import (
)
const (
autoscalingRunnerSetTestTimeout = time.Second * 20
autoscalingRunnerSetTestTimeout = time.Second * 5
autoscalingRunnerSetTestInterval = time.Millisecond * 250
autoscalingRunnerSetTestGitHubToken = "gh_token"
)

View File

@@ -3,6 +3,7 @@ package actionsgithubcom
import (
"context"
"go.opentelemetry.io/otel"
kclient "sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -16,6 +17,9 @@ type patcher interface {
}
func patch[T object[T]](ctx context.Context, client patcher, obj T, update func(obj T)) error {
ctx, span := otel.Tracer("arc").Start(ctx, "patch")
defer span.End()
original := obj.DeepCopy()
update(obj)
return client.Patch(ctx, obj, kclient.MergeFrom(original))
@@ -26,6 +30,9 @@ type subResourcePatcher interface {
}
func patchSubResource[T object[T]](ctx context.Context, client subResourcePatcher, obj T, update func(obj T)) error {
ctx, span := otel.Tracer("arc").Start(ctx, "patchSubResource")
defer span.End()
original := obj.DeepCopy()
update(obj)
return client.Patch(ctx, obj, kclient.MergeFrom(original))

View File

@@ -26,6 +26,8 @@ import (
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"go.opentelemetry.io/otel"
otelCodes "go.opentelemetry.io/otel/codes"
"go.uber.org/multierr"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
@@ -49,10 +51,10 @@ const (
// EphemeralRunnerReconciler reconciles a EphemeralRunner object
type EphemeralRunnerReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
ActionsClient actions.MultiClient
ResourceBuilder
Log logr.Logger
Scheme *runtime.Scheme
ActionsClient actions.MultiClient
resourceBuilder resourceBuilder
}
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
@@ -68,6 +70,9 @@ type EphemeralRunnerReconciler struct {
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.6.4/pkg/reconcile
func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.Reconcile")
defer span.End()
log := r.Log.WithValues("ephemeralrunner", req.NamespacedName)
ephemeralRunner := new(v1alpha1.EphemeralRunner)
@@ -149,6 +154,19 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) {
log.Info("Adding runner registration finalizer")
err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
controllerutil.AddFinalizer(obj, ephemeralRunnerActionsFinalizerName)
})
if err != nil {
log.Error(err, "Failed to update with runner registration finalizer set")
return ctrl.Result{}, err
}
log.Info("Successfully added runner registration finalizer")
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
log.Info("Adding finalizer")
if err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
@@ -162,23 +180,11 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, nil
}
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) {
log.Info("Adding runner registration finalizer")
err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
controllerutil.AddFinalizer(obj, ephemeralRunnerActionsFinalizerName)
})
if err != nil {
log.Error(err, "Failed to update with runner registration finalizer set")
return ctrl.Result{}, err
}
log.Info("Successfully added runner registration finalizer")
return ctrl.Result{}, nil
}
if ephemeralRunner.Status.RunnerId == 0 {
log.Info("Creating new ephemeral runner registration and updating status with runner config")
return r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log)
if r, err := r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log); r != nil {
return *r, err
}
}
secret := new(corev1.Secret)
@@ -189,7 +195,17 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
}
// create secret if not created
log.Info("Creating new ephemeral runner secret for jitconfig.")
return r.createSecret(ctx, ephemeralRunner, log)
if r, err := r.createSecret(ctx, ephemeralRunner, log); r != nil {
return *r, err
}
// Retry to get the secret that was just created.
// Otherwise, even though we want to continue to create the pod,
// it fails due to the missing secret resulting in an invalid pod spec.
if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
log.Error(err, "Failed to fetch secret")
return ctrl.Result{}, err
}
}
pod := new(corev1.Pod)
@@ -295,6 +311,9 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
}
func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupRunnerFromService")
defer span.End()
if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil {
actionsError := &actions.ActionsError{}
if !errors.As(err, &actionsError) {
@@ -324,6 +343,15 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context
}
func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (deleted bool, err error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupResources")
defer span.End()
defer func() {
if err != nil {
span.SetStatus(otelCodes.Error, "error")
span.RecordError(err)
}
}()
log.Info("Cleaning up the runner pod")
pod := new(corev1.Pod)
err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, pod)
@@ -362,6 +390,15 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
}
func (r *EphemeralRunnerReconciler) cleanupContainerHooksResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupContainerHooksResources")
defer span.End()
defer func() {
if err != nil {
span.SetStatus(otelCodes.Error, "error")
span.RecordError(err)
}
}()
log.Info("Cleaning up runner linked pods")
done, err = r.cleanupRunnerLinkedPods(ctx, ephemeralRunner, log)
if err != nil {
@@ -382,6 +419,15 @@ func (r *EphemeralRunnerReconciler) cleanupContainerHooksResources(ctx context.C
}
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupRunnerLinkedPods")
defer span.End()
defer func() {
if err != nil {
span.SetStatus(otelCodes.Error, "error")
span.RecordError(err)
}
}()
runnerLinedLabels := client.MatchingLabels(
map[string]string{
"runner-pod": ephemeralRunner.Name,
@@ -417,6 +463,15 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
}
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupRunnerLinkedSecrets")
defer span.End()
defer func() {
if err != nil {
span.SetStatus(otelCodes.Error, "error")
span.RecordError(err)
}
}()
runnerLinkedLabels := client.MatchingLabels(
map[string]string{
"runner-pod": ephemeralRunner.ObjectMeta.Name,
@@ -452,6 +507,9 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
}
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, errMessage string, reason string, log logr.Logger) error {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.markAsFailed")
defer span.End()
log.Info("Updating ephemeral runner status to Failed")
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = corev1.PodFailed
@@ -471,6 +529,9 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
}
func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.markAsFinished")
defer span.End()
log.Info("Updating ephemeral runner status to Finished")
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = corev1.PodSucceeded
@@ -485,6 +546,9 @@ func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemera
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
// It should not be responsible for setting the status to Failed.
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.deletePodAsFailed")
defer span.End()
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
@@ -511,35 +575,30 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
// updateStatusWithRunnerConfig fetches runtime configuration needed by the runner
// This method should always set .status.runnerId and .status.runnerJITConfig
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.updateStatusWithRunnerConfig")
defer span.End()
// Runner is not registered with the service. We need to register it first
log.Info("Creating ephemeral runner JIT config")
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunner)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %v", err)
}
jitSettings := &actions.RunnerScaleSetJitRunnerSetting{
Name: ephemeralRunner.Name,
}
for i := range ephemeralRunner.Spec.Spec.Containers {
if ephemeralRunner.Spec.Spec.Containers[i].Name == EphemeralRunnerContainerName &&
ephemeralRunner.Spec.Spec.Containers[i].WorkingDir != "" {
jitSettings.WorkFolder = ephemeralRunner.Spec.Spec.Containers[i].WorkingDir
}
}
jitConfig, err := actionsClient.GenerateJitRunnerConfig(ctx, jitSettings, ephemeralRunner.Spec.RunnerScaleSetId)
if err != nil {
actionsError := &actions.ActionsError{}
if !errors.As(err, &actionsError) {
return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %v", err)
}
if actionsError.StatusCode != http.StatusConflict ||
!actionsError.IsException("AgentExistsException") {
return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %v", err)
}
// If the runner with the name we want already exists it means:
@@ -552,12 +611,12 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
log.Info("Getting runner jit config failed with conflict error, trying to get the runner by name", "runnerName", ephemeralRunner.Name)
existingRunner, err := actionsClient.GetRunnerByName(ctx, ephemeralRunner.Name)
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to get runner by name: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to get runner by name: %v", err)
}
if existingRunner == nil {
log.Info("Runner with the same name does not exist, re-queuing the reconciliation")
return ctrl.Result{Requeue: true}, nil
return &ctrl.Result{Requeue: true}, nil
}
log.Info("Found the runner with the same name", "runnerId", existingRunner.Id, "runnerScaleSetId", existingRunner.RunnerScaleSetId)
@@ -565,16 +624,16 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
log.Info("Removing the runner with the same name")
err := actionsClient.RemoveRunner(ctx, int64(existingRunner.Id))
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %v", err)
}
log.Info("Removed the runner with the same name, re-queuing the reconciliation")
return ctrl.Result{Requeue: true}, nil
return &ctrl.Result{Requeue: true}, nil
}
// TODO: Do we want to mark the ephemeral runner as failed, and let EphemeralRunnerSet to clean it up, so we can recover from this situation?
// The situation is that the EphemeralRunner's name is already used by something else to register a runner, and we can't take the control back.
return ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %v", err)
return &ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %v", err)
}
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
@@ -585,14 +644,26 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
obj.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
})
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %v", err)
}
// We want to continue without a requeue for faster pod creation.
//
// To do so, we update the status in-place, so that both continuing the loop and
// and requeuing and skipping updateStatusWithRunnerConfig in the next loop, will
// have the same effect.
ephemeralRunner.Status.RunnerId = jitConfig.Runner.Id
ephemeralRunner.Status.RunnerName = jitConfig.Runner.Name
ephemeralRunner.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
log.Info("Updated ephemeral runner status with runnerId and runnerJITConfig")
return ctrl.Result{}, nil
return nil, nil
}
func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.createPod")
defer span.End()
var envs []corev1.EnvVar
if runner.Spec.ProxySecretRef != "" {
http := corev1.EnvVar{
@@ -642,7 +713,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
}
log.Info("Creating new pod for ephemeral runner")
newPod := r.ResourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...)
newPod := r.resourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...)
if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil {
log.Error(err, "Failed to set controller reference to a new pod")
@@ -665,21 +736,24 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
return ctrl.Result{}, nil
}
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.createSecret")
defer span.End()
log.Info("Creating new secret for ephemeral runner")
jitSecret := r.ResourceBuilder.newEphemeralRunnerJitSecret(runner)
jitSecret := r.resourceBuilder.newEphemeralRunnerJitSecret(runner)
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to set controller reference: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %v", err)
}
log.Info("Created new secret spec for ephemeral runner")
if err := r.Create(ctx, jitSecret); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to create jit secret: %v", err)
return &ctrl.Result{}, fmt.Errorf("failed to create jit secret: %v", err)
}
log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name)
return ctrl.Result{Requeue: true}, nil
return nil, nil
}
// updateRunStatusFromPod is responsible for updating non-exiting statuses.
@@ -688,6 +762,9 @@ func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1
// The event should not be re-queued since the termination status should be set
// before proceeding with reconciliation logic
func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.updateRunStatusFromPod")
defer span.End()
if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed {
return nil
}
@@ -711,6 +788,9 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
}
func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) (actions.ActionsService, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.actionsClientFor")
defer span.End()
secret := new(corev1.Secret)
if err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: runner.Spec.GitHubConfigSecret}, secret); err != nil {
return nil, fmt.Errorf("failed to get secret: %w", err)
@@ -731,6 +811,9 @@ func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner
}
func (r *EphemeralRunnerReconciler) actionsClientOptionsFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) ([]actions.ClientOption, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.actionsClientOptionsFor")
defer span.End()
var opts []actions.ClientOption
if runner.Spec.Proxy != nil {
proxyFunc, err := runner.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
@@ -780,6 +863,15 @@ func (r *EphemeralRunnerReconciler) actionsClientOptionsFor(ctx context.Context,
// runnerRegisteredWithService checks if the runner is still registered with the service
// Returns found=false and err=nil if ephemeral runner does not exist in GitHub service and should be deleted
func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (found bool, err error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.runnerRegisteredWithService")
defer span.End()
defer func() {
if err != nil {
span.SetStatus(otelCodes.Error, "error")
span.RecordError(err)
}
}()
actionsClient, err := r.actionsClientFor(ctx, runner)
if err != nil {
return false, fmt.Errorf("failed to get Actions client for ScaleSet: %w", err)
@@ -807,6 +899,9 @@ func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Conte
}
func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.deleteRunnerFromService")
defer span.End()
client, err := r.actionsClientFor(ctx, ephemeralRunner)
if err != nil {
return fmt.Errorf("failed to get actions client for runner: %v", err)
@@ -828,6 +923,7 @@ func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
For(&v1alpha1.EphemeralRunner{}).
Owns(&corev1.Pod{}).
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
Named("ephemeral-runner-controller").
Complete(r)
}

View File

@@ -29,9 +29,9 @@ import (
)
const (
ephemeralRunnerTimeout = time.Second * 20
ephemeralRunnerInterval = time.Millisecond * 250
runnerImage = "ghcr.io/actions/actions-runner:latest"
timeout = time.Second * 10
interval = time.Millisecond * 250
runnerImage = "ghcr.io/actions/actions-runner:latest"
)
func newExampleRunner(name, namespace, configSecretName string) *v1alpha1.EphemeralRunner {
@@ -133,9 +133,9 @@ var _ = Describe("EphemeralRunner", func() {
n := len(created.Finalizers) // avoid capacity mismatch
return created.Finalizers[:n:n], nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo([]string{ephemeralRunnerFinalizerName, ephemeralRunnerActionsFinalizerName}))
timeout,
interval,
).Should(BeEquivalentTo([]string{ephemeralRunnerActionsFinalizerName, ephemeralRunnerFinalizerName}))
Eventually(
func() (bool, error) {
@@ -147,8 +147,8 @@ var _ = Describe("EphemeralRunner", func() {
_, ok := secret.Data[jitTokenKey]
return ok, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
Eventually(
@@ -160,8 +160,8 @@ var _ = Describe("EphemeralRunner", func() {
return pod.Name, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(ephemeralRunner.Name))
})
@@ -184,8 +184,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
})
@@ -203,7 +203,7 @@ var _ = Describe("EphemeralRunner", func() {
return "", nil
}
return updated.Status.Phase, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodFailed))
}, timeout, interval).Should(BeEquivalentTo(corev1.PodFailed))
Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
})
@@ -247,8 +247,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
// create runner linked secret
@@ -273,8 +273,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
err = k8sClient.Delete(ctx, ephemeralRunner)
@@ -289,8 +289,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
Eventually(
@@ -302,8 +302,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
Eventually(
@@ -315,8 +315,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
Eventually(
@@ -328,8 +328,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
Eventually(
@@ -341,8 +341,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return kerrors.IsNotFound(err), nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
})
@@ -356,8 +356,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return updatedEphemeralRunner.Status.RunnerId, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeNumerically(">", 0))
})
@@ -371,8 +371,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
for _, phase := range []corev1.PodPhase{corev1.PodRunning, corev1.PodPending} {
@@ -395,8 +395,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return updated.Status.Phase, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(phase))
}
})
@@ -411,8 +411,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
pod.Status.Phase = corev1.PodRunning
@@ -427,7 +427,7 @@ var _ = Describe("EphemeralRunner", func() {
}
return updated.Status.Phase, nil
},
ephemeralRunnerTimeout,
timeout,
).Should(BeEquivalentTo(""))
})
@@ -462,8 +462,8 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).To(BeNil(), "Failed to update pod status")
return false, fmt.Errorf("pod haven't failed for 5 times.")
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true), "we should stop creating pod after 5 failures")
// In case we still have pod created due to controller-runtime cache delay, mark the container as exited
@@ -488,7 +488,7 @@ var _ = Describe("EphemeralRunner", func() {
return "", err
}
return updated.Status.Reason, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures")
}, timeout, interval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures")
// EphemeralRunner should not have any pod
Eventually(func() (bool, error) {
@@ -497,7 +497,7 @@ var _ = Describe("EphemeralRunner", func() {
return false, nil
}
return kerrors.IsNotFound(err), nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
}, timeout, interval).Should(BeEquivalentTo(true))
})
It("It should re-create pod on eviction", func() {
@@ -510,8 +510,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
pod.Status.Phase = corev1.PodFailed
@@ -530,7 +530,7 @@ var _ = Describe("EphemeralRunner", func() {
return false, err
}
return len(updated.Status.Failures) == 1, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
}, timeout, interval).Should(BeEquivalentTo(true))
// should re-create after failure
Eventually(
@@ -541,8 +541,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
})
@@ -555,8 +555,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
@@ -577,7 +577,7 @@ var _ = Describe("EphemeralRunner", func() {
return false, err
}
return len(updated.Status.Failures) == 1, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
}, timeout, interval).Should(BeEquivalentTo(true))
// should re-create after failure
Eventually(
@@ -588,8 +588,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
})
@@ -602,8 +602,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return true, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(true))
// first set phase to running
@@ -627,8 +627,8 @@ var _ = Describe("EphemeralRunner", func() {
}
return updated.Status.Phase, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(BeEquivalentTo(corev1.PodRunning))
// set phase to succeeded
@@ -644,7 +644,7 @@ var _ = Describe("EphemeralRunner", func() {
}
return updated.Status.Phase, nil
},
ephemeralRunnerTimeout,
timeout,
).Should(BeEquivalentTo(corev1.PodRunning))
})
})
@@ -700,7 +700,7 @@ var _ = Describe("EphemeralRunner", func() {
return false, err
}
return true, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
}, timeout, interval).Should(BeEquivalentTo(true))
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
Name: EphemeralRunnerContainerName,
@@ -720,7 +720,7 @@ var _ = Describe("EphemeralRunner", func() {
return "", nil
}
return updated.Status.Phase, nil
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodSucceeded))
}, timeout, interval).Should(BeEquivalentTo(corev1.PodSucceeded))
})
})
@@ -800,7 +800,7 @@ var _ = Describe("EphemeralRunner", func() {
return proxySuccessfulllyCalled
},
2*time.Second,
ephemeralRunnerInterval,
interval,
).Should(BeEquivalentTo(true))
})
@@ -825,8 +825,8 @@ var _ = Describe("EphemeralRunner", func() {
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
g.Expect(err).To(BeNil(), "failed to get ephemeral runner pod")
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
timeout,
interval,
).Should(Succeed(), "failed to get ephemeral runner pod")
Expect(pod.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{
@@ -958,7 +958,7 @@ var _ = Describe("EphemeralRunner", func() {
return serverSuccessfullyCalled
},
2*time.Second,
ephemeralRunnerInterval,
interval,
).Should(BeTrue(), "failed to contact server")
})
})

View File

@@ -28,6 +28,7 @@ import (
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
"github.com/actions/actions-runner-controller/github/actions"
"github.com/go-logr/logr"
"go.opentelemetry.io/otel"
"go.uber.org/multierr"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
@@ -53,7 +54,7 @@ type EphemeralRunnerSetReconciler struct {
PublishMetrics bool
ResourceBuilder
resourceBuilder resourceBuilder
}
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
@@ -75,6 +76,9 @@ type EphemeralRunnerSetReconciler struct {
// be to bring the count of EphemeralRunners to the desired one, not to patch this resource
// until it is safe to do so
func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.Reconcile")
defer span.End()
log := r.Log.WithValues("ephemeralrunnerset", req.NamespacedName)
ephemeralRunnerSet := new(v1alpha1.EphemeralRunnerSet)
@@ -250,6 +254,9 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
}
func (r *EphemeralRunnerSetReconciler) cleanupFinishedEphemeralRunners(ctx context.Context, finishedEphemeralRunners []*v1alpha1.EphemeralRunner, log logr.Logger) error {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.cleanupFinishedEphemeralRunners")
defer span.End()
// cleanup finished runners and proceed
var errs []error
for i := range finishedEphemeralRunners {
@@ -265,6 +272,9 @@ func (r *EphemeralRunnerSetReconciler) cleanupFinishedEphemeralRunners(ctx conte
}
func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.cleanUpProxySecret")
defer span.End()
if ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy == nil {
return nil
}
@@ -284,6 +294,9 @@ func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, e
}
func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.cleanUpEphemeralRunners")
defer span.End()
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: ephemeralRunnerSet.Name})
if err != nil {
@@ -357,10 +370,13 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
// createEphemeralRunners provisions `count` number of v1alpha1.EphemeralRunner resources in the cluster.
func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Context, runnerSet *v1alpha1.EphemeralRunnerSet, count int, log logr.Logger) error {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.createEphemeralRunners")
defer span.End()
// Track multiple errors at once and return the bundle.
errs := make([]error, 0)
for i := 0; i < count; i++ {
ephemeralRunner := r.ResourceBuilder.newEphemeralRunner(runnerSet)
ephemeralRunner := r.resourceBuilder.newEphemeralRunner(runnerSet)
if runnerSet.Spec.EphemeralRunnerSpec.Proxy != nil {
ephemeralRunner.Spec.ProxySecretRef = proxyEphemeralRunnerSetSecretName(runnerSet)
}
@@ -386,6 +402,9 @@ func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Contex
}
func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.createProxySecret")
defer span.End()
proxySecretData, err := ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) {
secret := new(corev1.Secret)
err := r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunnerSet.Namespace, Name: s}, secret)
@@ -431,6 +450,9 @@ func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ep
// When this happens, the next reconcile loop will try to delete the remaining ephemeral runners
// after we get notified by any of the `v1alpha1.EphemeralRunner.Status` updates.
func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners []*v1alpha1.EphemeralRunner, count int, log logr.Logger) error {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.deleteIdleEphemeralRunners")
defer span.End()
if count <= 0 {
return nil
}
@@ -477,6 +499,9 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
}
func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, actionsClient actions.ActionsService, log logr.Logger) (bool, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.deleteEphemeralRunnerWithActionsClient")
defer span.End()
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
actionsError := &actions.ActionsError{}
if !errors.As(err, &actionsError) {
@@ -503,6 +528,9 @@ func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ct
}
func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) (actions.ActionsService, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.actionsClientFor")
defer span.End()
secret := new(corev1.Secret)
if err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: rs.Spec.EphemeralRunnerSpec.GitHubConfigSecret}, secret); err != nil {
return nil, fmt.Errorf("failed to get secret: %w", err)
@@ -523,6 +551,9 @@ func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs
}
func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) ([]actions.ClientOption, error) {
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.actionsClientOptionsFor")
defer span.End()
var opts []actions.ClientOption
if rs.Spec.EphemeralRunnerSpec.Proxy != nil {
proxyFunc, err := rs.Spec.EphemeralRunnerSpec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {

View File

@@ -30,7 +30,7 @@ import (
)
const (
ephemeralRunnerSetTestTimeout = time.Second * 20
ephemeralRunnerSetTestTimeout = time.Second * 10
ephemeralRunnerSetTestInterval = time.Millisecond * 250
ephemeralRunnerSetTestGitHubToken = "gh_token"
)
@@ -794,6 +794,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
}
return len(runnerList.Items), nil
},
ephemeralRunnerSetTestTimeout,
ephemeralRunnerSetTestInterval,
@@ -1358,7 +1359,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
return proxySuccessfulllyCalled
},
2*time.Second,
ephemeralRunnerInterval,
interval,
).Should(BeEquivalentTo(true))
})
})

View File

@@ -8,7 +8,6 @@ import (
"math"
"net"
"strconv"
"strings"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/build"
@@ -16,6 +15,7 @@ import (
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/hash"
"github.com/actions/actions-runner-controller/logging"
"go.opentelemetry.io/otel"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -69,11 +69,9 @@ func SetListenerEntrypoint(entrypoint string) {
}
}
type ResourceBuilder struct {
ExcludeLabelPropagationPrefixes []string
}
type resourceBuilder struct{}
func (b *ResourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
return nil, err
@@ -88,7 +86,7 @@ func (b *ResourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
}
labels := b.mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
labels := mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
@@ -153,7 +151,7 @@ func (lm *listenerMetricsServerConfig) containerPort() (corev1.ContainerPort, er
}, nil
}
func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
func (b *resourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
var (
metricsAddr = ""
metricsEndpoint = ""
@@ -216,7 +214,7 @@ func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
}, nil
}
func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
listenerEnv := []corev1.EnvVar{
{
Name: "LISTENER_CONFIG_PATH",
@@ -409,12 +407,12 @@ func mergeListenerContainer(base, from *corev1.Container) {
base.TTY = from.TTY
}
func (b *ResourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerServiceAccountName(autoscalingListener),
Namespace: autoscalingListener.Namespace,
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
}),
@@ -422,14 +420,14 @@ func (b *ResourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener
}
}
func (b *ResourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.AutoscalingListener) *rbacv1.Role {
func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.AutoscalingListener) *rbacv1.Role {
rules := rulesForListenerRole([]string{autoscalingListener.Spec.EphemeralRunnerSetName})
rulesHash := hash.ComputeTemplateHash(&rules)
newRole := &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerRoleName(autoscalingListener),
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
labelKeyListenerNamespace: autoscalingListener.Namespace,
@@ -443,7 +441,7 @@ func (b *ResourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.
return newRole
}
func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount) *rbacv1.RoleBinding {
func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount) *rbacv1.RoleBinding {
roleRef := rbacv1.RoleRef{
Kind: "Role",
Name: listenerRole.Name,
@@ -463,7 +461,7 @@ func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerRoleName(autoscalingListener),
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
labelKeyListenerNamespace: autoscalingListener.Namespace,
@@ -479,14 +477,14 @@ func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
return newRoleBinding
}
func (b *ResourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
dataHash := hash.ComputeTemplateHash(&secret.Data)
newListenerSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerSecretMirrorName(autoscalingListener),
Namespace: autoscalingListener.Namespace,
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
"secret-data-hash": dataHash,
@@ -498,14 +496,14 @@ func (b *ResourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
return newListenerSecret
}
func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
return nil, err
}
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
labels := b.mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
labels := mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesComponent: "runner-set",
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
@@ -518,6 +516,7 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
}
newAnnotations := map[string]string{
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
AnnotationKeyGitHubRunnerScaleSetName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName],
annotationKeyRunnerSpecHash: runnerSpecHash,
@@ -547,7 +546,7 @@ func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
return newEphemeralRunnerSet, nil
}
func (b *ResourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
labels := make(map[string]string)
for k, v := range ephemeralRunnerSet.Labels {
if k == LabelKeyKubernetesComponent {
@@ -574,7 +573,10 @@ func (b *ResourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme
}
}
func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
ctx, span := otel.Tracer("arc").Start(ctx, "resourceBuilder.newEphemeralRunnerPod")
defer span.End()
var newPod corev1.Pod
labels := map[string]string{}
@@ -642,7 +644,7 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
return &newPod
}
func (b *ResourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret {
func (b *resourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: ephemeralRunner.Name,
@@ -750,26 +752,14 @@ func trimLabelValue(val string) string {
return val
}
func (b *ResourceBuilder) mergeLabels(base, overwrite map[string]string) map[string]string {
mergedLabels := make(map[string]string, len(base))
func mergeLabels(base, overwrite map[string]string) map[string]string {
mergedLabels := map[string]string{}
base:
for k, v := range base {
for _, prefix := range b.ExcludeLabelPropagationPrefixes {
if strings.HasPrefix(k, prefix) {
continue base
}
}
mergedLabels[k] = v
}
overwrite:
for k, v := range overwrite {
for _, prefix := range b.ExcludeLabelPropagationPrefixes {
if strings.HasPrefix(k, prefix) {
continue overwrite
}
}
mergedLabels[k] = v
}

View File

@@ -19,13 +19,9 @@ func TestLabelPropagation(t *testing.T) {
Name: "test-scale-set",
Namespace: "test-ns",
Labels: map[string]string{
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesVersion: "0.2.0",
"arbitrary-label": "random-value",
"example.com/label": "example-value",
"example.com/example": "example-value",
"directly.excluded.org/label": "excluded-value",
"directly.excluded.org/arbitrary": "not-excluded-value",
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesVersion: "0.2.0",
"arbitrary-label": "random-value",
},
Annotations: map[string]string{
runnerScaleSetIdAnnotationKey: "1",
@@ -38,12 +34,7 @@ func TestLabelPropagation(t *testing.T) {
},
}
b := ResourceBuilder{
ExcludeLabelPropagationPrefixes: []string{
"example.com/",
"directly.excluded.org/label",
},
}
var b resourceBuilder
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(&autoscalingRunnerSet)
require.NoError(t, err)
assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf])
@@ -72,11 +63,6 @@ func TestLabelPropagation(t *testing.T) {
assert.Equal(t, "repo", listener.Labels[LabelKeyGitHubRepository])
assert.Equal(t, autoscalingRunnerSet.Labels["arbitrary-label"], listener.Labels["arbitrary-label"])
assert.NotContains(t, listener.Labels, "example.com/label")
assert.NotContains(t, listener.Labels, "example.com/example")
assert.NotContains(t, listener.Labels, "directly.excluded.org/label")
assert.Equal(t, "not-excluded-value", listener.Labels["directly.excluded.org/arbitrary"])
listenerServiceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
@@ -142,7 +128,7 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
GitHubConfigUrl: fmt.Sprintf("https://github.com/%s/%s", organization, repository),
}
var b ResourceBuilder
var b resourceBuilder
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
require.NoError(t, err)
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 0)
@@ -166,7 +152,7 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
GitHubConfigUrl: fmt.Sprintf("https://github.com/enterprises/%s", enterprise),
}
var b ResourceBuilder
var b resourceBuilder
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
require.NoError(t, err)
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 63)

View File

@@ -42,7 +42,7 @@ eliminate some duplication:
`-coverprofile` flags: while `-short` is used to skip [old ARC E2E
tests](https://github.com/actions/actions-runner-controller/blob/master/test/e2e/e2e_test.go#L85-L87),
`-coverprofile` is adding to the test time without really giving us any value
in return. We should also start using `actions/setup-go@v5` to take advantage
in return. We should also start using `actions/setup-go@v4` to take advantage
of caching (it would speed up our tests by a lot) or enable it on `v3` if we
have a strong reason not to upgrade. We should keep ignoring our E2E tests too
as those will be run elsewhere (either use `Short` there too or ignoring the

View File

@@ -43,16 +43,6 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
## Changelog
### v0.9.3
1. AutoscalingListener controller: Inspect listener container state instead of pod phase [#3548](https://github.com/actions/actions-runner-controller/pull/3548)
1. Exclude label prefix propagation [#3607](https://github.com/actions/actions-runner-controller/pull/3607)
1. Check status code of fetch access token for github app [#3568](https://github.com/actions/actions-runner-controller/pull/3568)
1. Remove .Named() from the ephemeral runner controller [#3596](https://github.com/actions/actions-runner-controller/pull/3596)
1. Customize work directory [#3477](https://github.com/actions/actions-runner-controller/pull/3477)
1. Fix problem with ephemeralRunner Succeeded state before build executed [#3528](https://github.com/actions/actions-runner-controller/pull/3528)
1. Remove finalizers in one pass to speed up cleanups AutoscalingRunnerSet [#3536](https://github.com/actions/actions-runner-controller/pull/3536)
### v0.9.2
1. Refresh session if token expires during delete message [#3529](https://github.com/actions/actions-runner-controller/pull/3529)

View File

@@ -1054,14 +1054,6 @@ func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, c
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusCreated {
return nil, &GitHubAPIError{
StatusCode: resp.StatusCode,
RequestID: resp.Header.Get(HeaderGitHubRequestID),
Err: fmt.Errorf("failed to get access token for GitHub App auth: %v", resp.Status),
}
}
// Format: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app
var accessToken *accessToken
if err = json.NewDecoder(resp.Body).Decode(&accessToken); err != nil {

43
go.mod
View File

@@ -1,10 +1,10 @@
module github.com/actions/actions-runner-controller
go 1.22.4
go 1.22.1
require (
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0
github.com/davecgh/go-spew v1.1.1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/evanphx/json-patch v5.9.0+incompatible
github.com/go-logr/logr v1.4.1
github.com/golang-jwt/jwt/v4 v4.5.0
@@ -14,7 +14,7 @@ require (
github.com/gorilla/mux v1.8.1
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
github.com/gruntwork-io/terratest v0.46.7
github.com/hashicorp/go-retryablehttp v0.7.7
github.com/hashicorp/go-retryablehttp v0.7.5
github.com/kelseyhightower/envconfig v1.4.0
github.com/onsi/ginkgo v1.16.5
github.com/onsi/ginkgo/v2 v2.17.1
@@ -23,12 +23,17 @@ require (
github.com/prometheus/client_golang v1.17.0
github.com/stretchr/testify v1.9.0
github.com/teambition/rrule-go v1.8.2
go.opentelemetry.io/otel v1.27.0
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.3.0
go.opentelemetry.io/otel/log v0.3.0
go.opentelemetry.io/otel/sdk/log v0.3.0
go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
golang.org/x/net v0.24.0
golang.org/x/oauth2 v0.19.0
golang.org/x/sync v0.7.0
gomodules.xyz/jsonpatch/v2 v2.4.0
gopkg.in/DataDog/dd-trace-go.v1 v1.65.1
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.28.4
k8s.io/apimachinery v0.28.4
@@ -38,23 +43,34 @@ require (
)
require (
github.com/DataDog/appsec-internal-go v1.6.0 // indirect
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1 // indirect
github.com/DataDog/datadog-go/v5 v5.3.0 // indirect
github.com/DataDog/go-libddwaf/v3 v3.2.1 // indirect
github.com/DataDog/go-tuf v1.0.2-0.5.2 // indirect
github.com/DataDog/sketches-go v1.4.5 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect
github.com/aws/aws-sdk-go v1.44.122 // indirect
github.com/aws/aws-sdk-go v1.44.327 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/boombuler/barcode v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/ebitengine/purego v0.6.0-alpha.5 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch/v5 v5.7.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.20.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-sql-driver/mysql v1.4.1 // indirect
github.com/go-sql-driver/mysql v1.6.0 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@@ -65,9 +81,9 @@ require (
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20231101202521-4ca4178f5c7a // indirect
github.com/gruntwork-io/go-commons v0.8.0 // indirect
github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-multierror v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
@@ -80,23 +96,32 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/outcaste-io/ristretto v0.2.3 // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/pquerna/otp v1.2.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/tinylib/msgp v1.1.8 // indirect
github.com/urfave/cli v1.22.2 // indirect
go.opentelemetry.io/otel/metric v1.27.0 // indirect
go.opentelemetry.io/otel/sdk v1.27.0 // indirect
go.opentelemetry.io/otel/trace v1.27.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
golang.org/x/crypto v0.22.0 // indirect
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
golang.org/x/mod v0.14.0 // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/term v0.19.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.4.0 // indirect
golang.org/x/tools v0.17.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

146
go.sum
View File

@@ -1,12 +1,31 @@
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DataDog/appsec-internal-go v1.6.0 h1:QHvPOv/O0s2fSI/BraZJNpRDAtdlrRm5APJFZNBxjAw=
github.com/DataDog/appsec-internal-go v1.6.0/go.mod h1:pEp8gjfNLtEOmz+iZqC8bXhu0h4k7NUsW/qiQb34k1U=
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 h1:bUMSNsw1iofWiju9yc1f+kBd33E3hMJtq9GuU602Iy8=
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0/go.mod h1:HzySONXnAgSmIQfL6gOv9hWprKJkx8CicuXuUbmgWfo=
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1 h1:5nE6N3JSs2IG3xzMthNFhXfOaXlrsdgqmJ73lndFf8c=
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1/go.mod h1:Vc+snp0Bey4MrrJyiV2tVxxJb6BmLomPvN1RgAvjGaQ=
github.com/DataDog/datadog-go/v5 v5.3.0 h1:2q2qjFOb3RwAZNU+ez27ZVDwErJv5/VpbBPprz7Z+s8=
github.com/DataDog/datadog-go/v5 v5.3.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q=
github.com/DataDog/go-libddwaf/v3 v3.2.1 h1:lZPc6UxCOwioHc++nsldKR50FpIrRh1uGnGLuryqnE8=
github.com/DataDog/go-libddwaf/v3 v3.2.1/go.mod h1:AP+7Atb8ftSsrha35wht7+K3R+xuzfVSQhabSO4w6CY=
github.com/DataDog/go-tuf v1.0.2-0.5.2 h1:EeZr937eKAWPxJ26IykAdWA4A0jQXJgkhUjqEI/w7+I=
github.com/DataDog/go-tuf v1.0.2-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4=
github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM=
github.com/DataDog/sketches-go v1.4.5 h1:ki7VfeNz7IcNafq7yI/j5U/YCkO3LJiMDtXz9OMQbyE=
github.com/DataDog/sketches-go v1.4.5/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX7IL/m9Y5LO+KQYv+t1CQOiFe6+SV2J7bE=
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/actions-runner-controller/httpcache v0.2.0 h1:hCNvYuVPJ2xxYBymqBvH0hSiQpqz4PHF/LbU3XghGNI=
github.com/actions-runner-controller/httpcache v0.2.0/go.mod h1:JLu9/2M/btPz1Zu/vTZ71XzukQHn2YeISPmJoM5exBI=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo=
github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go v1.44.327 h1:ZS8oO4+7MOBLhkdwIhgtVeDzCeWOlTfKJS7EgggbIEY=
github.com/aws/aws-sdk-go v1.44.327/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
@@ -16,6 +35,7 @@ github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0 h1:yUmoVv70H3J4UOqxqsee39+KlXxNEDfTbAp8c/qULKk=
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0/go.mod h1:fmPmvCiBWhJla3zDv9ZTQSZc8AbwyRnGW1yg5ep1Pcs=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
@@ -26,8 +46,19 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 h1:8EXxF+tCLqaVk8AOC29zl2mnhQjwyLxxOTuhUazWRsg=
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4/go.mod h1:I5sHm0Y0T1u5YjlyqC5GVArM7aNZRUYtTjmJ8mPJFds=
github.com/ebitengine/purego v0.6.0-alpha.5 h1:EYID3JOAdmQ4SNZYJHu9V6IqOeRQDBYxqKAg9PyoHFY=
github.com/ebitengine/purego v0.6.0-alpha.5/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
@@ -35,8 +66,10 @@ github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
@@ -46,9 +79,12 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU=
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
@@ -59,8 +95,8 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
@@ -70,6 +106,7 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@@ -78,7 +115,6 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU=
@@ -111,16 +147,24 @@ github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRa
github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78=
github.com/gruntwork-io/terratest v0.46.7 h1:oqGPBBO87SEsvBYaA0R5xOq+Lm2Xc5dmFVfxEolfZeU=
github.com/gruntwork-io/terratest v0.46.7/go.mod h1:6gI5MlLeyF+SLwqocA5GBzcTix+XiuxCy1BPwKuT+WM=
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
@@ -160,6 +204,8 @@ github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvls
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -181,10 +227,17 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE=
github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0=
github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac=
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/otp v1.2.0 h1:/A3+Jn+cagqayeR3iHs/L62m5ue7710D35zl1zJ1kok=
github.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
@@ -195,13 +248,22 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 h1:4+LEVOB87y175cLJC/mbsgKmoDOjrBldtXvioEy96WY=
github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3/go.mod h1:vl5+MqJ1nBINuSsUI2mGgH79UweUT/B5Fy8857PqyyI=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg=
github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -215,6 +277,7 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
@@ -222,11 +285,33 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/teambition/rrule-go v1.8.2 h1:lIjpjvWTj9fFUZCmuoVDrKVOtdiyzbzc93qTmRVe/J8=
github.com/teambition/rrule-go v1.8.2/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4=
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48=
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.3.0 h1:6aGq6rMOdOx9B385JpF1OpeL18+6Ho8bTFdxy10oEGY=
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.3.0/go.mod h1:fdZI+pB2Y6Dpl3Uf+1ZPrkX6cnwsUAhjK1f9yCAlJIM=
go.opentelemetry.io/otel/log v0.3.0 h1:kJRFkpUFYtny37NQzL386WbznUByZx186DpEMKhEGZs=
go.opentelemetry.io/otel/log v0.3.0/go.mod h1:ziCwqZr9soYDwGNbIL+6kAvQC+ANvjgG367HVcyR/ys=
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI=
go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
go.opentelemetry.io/otel/sdk/log v0.3.0 h1:GEjJ8iftz2l+XO1GF2856r7yYVh74URiF9JMcAacr5U=
go.opentelemetry.io/otel/sdk/log v0.3.0/go.mod h1:BwCxtmux6ACLuys1wlbc0+vGBd+xytjmjajwqqIul2g=
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -245,8 +330,12 @@ golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -254,9 +343,11 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
@@ -267,6 +358,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
@@ -284,10 +376,14 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -296,7 +392,9 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
@@ -304,8 +402,8 @@ golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
@@ -317,7 +415,9 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
@@ -325,10 +425,10 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -339,6 +439,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/DataDog/dd-trace-go.v1 v1.65.1 h1:Ne7kzWr/br/jwhUJR7CnqPl/mUpNxa6LfgZs0S4htZM=
gopkg.in/DataDog/dd-trace-go.v1 v1.65.1/go.mod h1:beNFIWd/H04d0k96cfltgiDH2+t0T5sDbyYLF3VTXqk=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@@ -355,6 +457,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/gotraceui v0.2.0 h1:dmNsfQ9Vl3GwbiVD7Z8d/osC6WtGGrasyrC2suc4ZIQ=
honnef.co/go/gotraceui v0.2.0/go.mod h1:qHo4/W75cA3bX0QQoSvDjbJa4R8mAyyFjbWAj63XElc=
k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY=
k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0=
k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08=

17
instrument.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env bash
set -e
git reset --hard origin/master
go get go.opentelemetry.io/otel
go get gopkg.in/DataDog/dd-trace-go.v1
go get github.com/davecgh/go-spew
go get github.com/pmezard/go-difflib
find . -name "*.go" | grep -E '(cmd/ghalistener|cmd/githubrunnerscalesetlistener|controllers/actions.github.com)' | xargs -I{} go-instrument -app arc -w -filename {}
git add -u
git commit -m 'chore: goinstrument'
git cherry-pick a6d6f3e

36
main.go
View File

@@ -94,11 +94,10 @@ func main() {
runnerImagePullSecrets stringSlice
runnerPodDefaults actionssummerwindnet.RunnerPodDefaults
namespace string
logLevel string
logFormat string
watchSingleNamespace string
excludeLabelPropagationPrefixes stringSlice
namespace string
logLevel string
logFormat string
watchSingleNamespace string
autoScalerImagePullSecrets stringSlice
@@ -139,7 +138,6 @@ func main() {
flag.Var(&commonRunnerLabels, "common-runner-labels", "Runner labels in the K1=V1,K2=V2,... format that are inherited all the runners created by the controller. See https://github.com/actions/actions-runner-controller/issues/321 for more information")
flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.")
flag.StringVar(&watchSingleNamespace, "watch-single-namespace", "", "Restrict to watch for custom resources in a single namespace.")
flag.Var(&excludeLabelPropagationPrefixes, "exclude-label-propagation-prefix", "The list of prefixes that should be excluded from label propagation")
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
flag.StringVar(&logFormat, "log-format", "text", `The log format. Valid options are "text" and "json". Defaults to "text"`)
flag.BoolVar(&autoScalingRunnerSetOnly, "auto-scaling-runner-set-only", false, "Make controller only reconcile AutoRunnerScaleSet object.")
@@ -260,10 +258,6 @@ func main() {
log.WithName("actions-clients"),
)
rb := actionsgithubcom.ResourceBuilder{
ExcludeLabelPropagationPrefixes: excludeLabelPropagationPrefixes,
}
if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{
Client: mgr.GetClient(),
Log: log.WithName("AutoscalingRunnerSet").WithValues("version", build.Version),
@@ -273,30 +267,27 @@ func main() {
ActionsClient: actionsMultiClient,
UpdateStrategy: actionsgithubcom.UpdateStrategy(updateStrategy),
DefaultRunnerScaleSetListenerImagePullSecrets: autoScalerImagePullSecrets,
ResourceBuilder: rb,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "AutoscalingRunnerSet")
os.Exit(1)
}
if err = (&actionsgithubcom.EphemeralRunnerReconciler{
Client: mgr.GetClient(),
Log: log.WithName("EphemeralRunner").WithValues("version", build.Version),
Scheme: mgr.GetScheme(),
ActionsClient: actionsMultiClient,
ResourceBuilder: rb,
Client: mgr.GetClient(),
Log: log.WithName("EphemeralRunner").WithValues("version", build.Version),
Scheme: mgr.GetScheme(),
ActionsClient: actionsMultiClient,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "EphemeralRunner")
os.Exit(1)
}
if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{
Client: mgr.GetClient(),
Log: log.WithName("EphemeralRunnerSet").WithValues("version", build.Version),
Scheme: mgr.GetScheme(),
ActionsClient: actionsMultiClient,
PublishMetrics: metricsAddr != "0",
ResourceBuilder: rb,
Client: mgr.GetClient(),
Log: log.WithName("EphemeralRunnerSet").WithValues("version", build.Version),
Scheme: mgr.GetScheme(),
ActionsClient: actionsMultiClient,
PublishMetrics: metricsAddr != "0",
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "EphemeralRunnerSet")
os.Exit(1)
@@ -308,7 +299,6 @@ func main() {
Scheme: mgr.GetScheme(),
ListenerMetricsAddr: listenerMetricsAddr,
ListenerMetricsEndpoint: listenerMetricsEndpoint,
ResourceBuilder: rb,
}).SetupWithManager(mgr); err != nil {
log.Error(err, "unable to create controller", "controller", "AutoscalingListener")
os.Exit(1)

15
prep_fork.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/usr/bin/env bash
set -e
for f in arc-publish.yaml arc-{publish,validate}-chart.yaml \
arc-{release-runners,validate-runners,update-runners-scheduled}.yaml gha-{publish,validate}-chart.yaml \
global-run-{first-interaction,stale}.yaml; do
echo "Processing $f"
git rm .github/workflows/$f
done
git commit -m "Remove workflows unused in a forked repo"
# cherry-pick: Remove legacy-canary-build job from global-publish-canary.yaml as unused in a forked repo
git cherry-pick 842b16d71498a5f2a1fc17c0917372435464d49c

View File

@@ -6,7 +6,7 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless
OS_IMAGE ?= ubuntu-22.04
TARGETPLATFORM ?= $(shell arch)
RUNNER_VERSION ?= 2.317.0
RUNNER_VERSION ?= 2.316.1
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.6.0
DOCKER_VERSION ?= 24.0.7

View File

@@ -1,2 +1,2 @@
RUNNER_VERSION=2.317.0
RUNNER_VERSION=2.316.1
RUNNER_CONTAINER_HOOKS_VERSION=0.6.0

View File

@@ -36,7 +36,7 @@ var (
testResultCMNamePrefix = "test-result-"
RunnerVersion = "2.317.0"
RunnerVersion = "2.316.1"
RunnerContainerHooksVersion = "0.6.0"
)
@@ -1106,7 +1106,7 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
testing.Step{
Uses: "actions/setup-go@v3",
With: &testing.With{
GoVersion: "1.22.4",
GoVersion: "1.22.1",
},
},
)
@@ -1236,7 +1236,7 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
testing.Step{
Uses: "azure/setup-kubectl@v1",
With: &testing.With{
Version: "v1.22.4",
Version: "v1.22.1",
},
},
testing.Step{

View File

@@ -355,7 +355,7 @@ nodes:
image: %s
`, k.Name, image, image))
if err := os.WriteFile(f.Name(), kindConfig, 0o644); err != nil {
if err := os.WriteFile(f.Name(), kindConfig, 0644); err != nil {
return err
}
@@ -385,7 +385,7 @@ func (k *Kind) LoadImages(ctx context.Context, images []ContainerImage) error {
}
tmpDir := filepath.Join(wd, ".testing", k.Name)
if err := os.MkdirAll(tmpDir, 0o755); err != nil {
if err := os.MkdirAll(tmpDir, 0755); err != nil {
return err
}
defer func() {

View File

@@ -1,7 +1,7 @@
package testing
const (
ActionsCheckout = "actions/checkout@v4"
ActionsCheckout = "actions/checkout@v3"
)
type Workflow struct {