mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 19:50:30 +00:00
Compare commits
20 Commits
ddtrace-wi
...
responsive
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a4876c5d03 | ||
|
|
f58dd76763 | ||
|
|
90b68fec1a | ||
|
|
1be410ba80 | ||
|
|
930c9db6e7 | ||
|
|
a152741a1a | ||
|
|
80d848339e | ||
|
|
8535a24135 | ||
|
|
b349ded2be | ||
|
|
6276c84493 | ||
|
|
4a8420ce96 | ||
|
|
a62ca3d853 | ||
|
|
4eb038eaa1 | ||
|
|
b2c6992e84 | ||
|
|
0a6208e38d | ||
|
|
2cc793a835 | ||
|
|
894732732a | ||
|
|
e45ac190e2 | ||
|
|
d0fb7206a4 | ||
|
|
9afd93065f |
@@ -47,7 +47,7 @@ runs:
|
|||||||
-d '{"ref": "main", "inputs": { "arc_name": "${{inputs.arc-name}}" } }'
|
-d '{"ref": "main", "inputs": { "arc_name": "${{inputs.arc-name}}" } }'
|
||||||
|
|
||||||
- name: Fetch workflow run & job ids
|
- name: Fetch workflow run & job ids
|
||||||
uses: actions/github-script@v6
|
uses: actions/github-script@v7
|
||||||
id: query_workflow
|
id: query_workflow
|
||||||
with:
|
with:
|
||||||
script: |
|
script: |
|
||||||
@@ -128,7 +128,7 @@ runs:
|
|||||||
|
|
||||||
- name: Wait for workflow to start running
|
- name: Wait for workflow to start running
|
||||||
if: inputs.wait-to-running == 'true' && inputs.wait-to-finish == 'false'
|
if: inputs.wait-to-running == 'true' && inputs.wait-to-finish == 'false'
|
||||||
uses: actions/github-script@v6
|
uses: actions/github-script@v7
|
||||||
with:
|
with:
|
||||||
script: |
|
script: |
|
||||||
function sleep(ms) {
|
function sleep(ms) {
|
||||||
@@ -156,7 +156,7 @@ runs:
|
|||||||
|
|
||||||
- name: Wait for workflow to finish successfully
|
- name: Wait for workflow to finish successfully
|
||||||
if: inputs.wait-to-finish == 'true'
|
if: inputs.wait-to-finish == 'true'
|
||||||
uses: actions/github-script@v6
|
uses: actions/github-script@v7
|
||||||
with:
|
with:
|
||||||
script: |
|
script: |
|
||||||
// Wait 5 minutes and make sure the workflow run we triggered completed with result 'success'
|
// Wait 5 minutes and make sure the workflow run we triggered completed with result 'success'
|
||||||
|
|||||||
6
.github/actions/setup-arc-e2e/action.yaml
vendored
6
.github/actions/setup-arc-e2e/action.yaml
vendored
@@ -27,7 +27,7 @@ runs:
|
|||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||||
# BuildKit v0.11 which has a bug causing intermittent
|
# BuildKit v0.11 which has a bug causing intermittent
|
||||||
@@ -36,7 +36,7 @@ runs:
|
|||||||
driver-opts: image=moby/buildkit:v0.10.6
|
driver-opts: image=moby/buildkit:v0.10.6
|
||||||
|
|
||||||
- name: Build controller image
|
- name: Build controller image
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
file: Dockerfile
|
file: Dockerfile
|
||||||
platforms: linux/amd64
|
platforms: linux/amd64
|
||||||
@@ -56,7 +56,7 @@ runs:
|
|||||||
|
|
||||||
- name: Get configure token
|
- name: Get configure token
|
||||||
id: config-token
|
id: config-token
|
||||||
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
|
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||||
with:
|
with:
|
||||||
application_id: ${{ inputs.app-id }}
|
application_id: ${{ inputs.app-id }}
|
||||||
application_private_key: ${{ inputs.app-pk }}
|
application_private_key: ${{ inputs.app-pk }}
|
||||||
|
|||||||
@@ -24,23 +24,23 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
|
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }}
|
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.password != '' }}
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ inputs.username }}
|
username: ${{ inputs.username }}
|
||||||
password: ${{ inputs.password }}
|
password: ${{ inputs.password }}
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }}
|
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' && inputs.ghcr_password != '' }}
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ inputs.ghcr_username }}
|
username: ${{ inputs.ghcr_username }}
|
||||||
|
|||||||
212
.github/workflows/arc-publish-chart.yaml
vendored
Normal file
212
.github/workflows/arc-publish-chart.yaml
vendored
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
name: Publish ARC Helm Charts
|
||||||
|
|
||||||
|
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||||
|
# for details on why we use this approach
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths:
|
||||||
|
- 'charts/**'
|
||||||
|
- '.github/workflows/arc-publish-chart.yaml'
|
||||||
|
- '!charts/actions-runner-controller/docs/**'
|
||||||
|
- '!charts/gha-runner-scale-set-controller/**'
|
||||||
|
- '!charts/gha-runner-scale-set/**'
|
||||||
|
- '!**.md'
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
force:
|
||||||
|
description: 'Force publish even if the chart version is not bumped'
|
||||||
|
type: boolean
|
||||||
|
required: true
|
||||||
|
default: false
|
||||||
|
|
||||||
|
env:
|
||||||
|
KUBE_SCORE_VERSION: 1.10.0
|
||||||
|
HELM_VERSION: v3.8.0
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint-chart:
|
||||||
|
name: Lint Chart
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Helm
|
||||||
|
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||||
|
with:
|
||||||
|
version: ${{ env.HELM_VERSION }}
|
||||||
|
|
||||||
|
- name: Set up kube-score
|
||||||
|
run: |
|
||||||
|
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||||
|
chmod 755 kube-score
|
||||||
|
|
||||||
|
- name: Kube-score generated manifests
|
||||||
|
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
|
||||||
|
|
||||||
|
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Set up chart-testing
|
||||||
|
uses: helm/chart-testing-action@v2.6.0
|
||||||
|
|
||||||
|
- name: Run chart-testing (list-changed)
|
||||||
|
id: list-changed
|
||||||
|
run: |
|
||||||
|
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
|
||||||
|
if [[ -n "$changed" ]]; then
|
||||||
|
echo "changed=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Run chart-testing (lint)
|
||||||
|
run: |
|
||||||
|
ct lint --config charts/.ci/ct-config.yaml
|
||||||
|
|
||||||
|
- name: Create kind cluster
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
uses: helm/kind-action@v1.4.0
|
||||||
|
|
||||||
|
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||||
|
- name: Install cert-manager
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
run: |
|
||||||
|
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||||
|
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||||
|
|
||||||
|
- name: Run chart-testing (install)
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
run: ct install --config charts/.ci/ct-config.yaml
|
||||||
|
|
||||||
|
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
|
||||||
|
- name: Check if Chart Publish is Needed
|
||||||
|
id: publish-chart-step
|
||||||
|
run: |
|
||||||
|
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
|
||||||
|
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
|
||||||
|
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
|
||||||
|
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
|
||||||
|
|
||||||
|
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
|
||||||
|
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# Always publish if force is true
|
||||||
|
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
|
||||||
|
echo "publish=true" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "publish=false" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Job summary
|
||||||
|
run: |
|
||||||
|
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
publish-chart:
|
||||||
|
if: needs.lint-chart.outputs.publish-chart == 'true'
|
||||||
|
needs: lint-chart
|
||||||
|
name: Publish Chart
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write # for helm/chart-releaser-action to push chart release and create a release
|
||||||
|
env:
|
||||||
|
CHART_TARGET_ORG: actions-runner-controller
|
||||||
|
CHART_TARGET_REPO: actions-runner-controller.github.io
|
||||||
|
CHART_TARGET_BRANCH: master
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Configure Git
|
||||||
|
run: |
|
||||||
|
git config user.name "$GITHUB_ACTOR"
|
||||||
|
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||||
|
|
||||||
|
- name: Get Token
|
||||||
|
id: get_workflow_token
|
||||||
|
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||||
|
with:
|
||||||
|
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||||
|
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||||
|
organization: ${{ env.CHART_TARGET_ORG }}
|
||||||
|
|
||||||
|
- name: Install chart-releaser
|
||||||
|
uses: helm/chart-releaser-action@v1.4.1
|
||||||
|
with:
|
||||||
|
install_only: true
|
||||||
|
install_dir: ${{ github.workspace }}/bin
|
||||||
|
|
||||||
|
- name: Package and upload release assets
|
||||||
|
run: |
|
||||||
|
cr package \
|
||||||
|
${{ github.workspace }}/charts/actions-runner-controller/ \
|
||||||
|
--package-path .cr-release-packages
|
||||||
|
|
||||||
|
cr upload \
|
||||||
|
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||||
|
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||||
|
--package-path .cr-release-packages \
|
||||||
|
--token ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Generate updated index.yaml
|
||||||
|
run: |
|
||||||
|
cr index \
|
||||||
|
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
|
||||||
|
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
|
||||||
|
--index-path ${{ github.workspace }}/index.yaml \
|
||||||
|
--token ${{ secrets.GITHUB_TOKEN }} \
|
||||||
|
--push \
|
||||||
|
--pages-branch 'gh-pages' \
|
||||||
|
--pages-index-path 'index.yaml'
|
||||||
|
|
||||||
|
# Chart Release was never intended to publish to a different repo
|
||||||
|
# this workaround is intended to move the index.yaml to the target repo
|
||||||
|
# where the github pages are hosted
|
||||||
|
- name: Checkout target repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||||
|
path: ${{ env.CHART_TARGET_REPO }}
|
||||||
|
ref: ${{ env.CHART_TARGET_BRANCH }}
|
||||||
|
token: ${{ steps.get_workflow_token.outputs.token }}
|
||||||
|
|
||||||
|
- name: Copy index.yaml
|
||||||
|
run: |
|
||||||
|
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
|
||||||
|
|
||||||
|
- name: Commit and push to target repository
|
||||||
|
run: |
|
||||||
|
git config user.name "$GITHUB_ACTOR"
|
||||||
|
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||||
|
git add .
|
||||||
|
git commit -m "Update index.yaml"
|
||||||
|
git push
|
||||||
|
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
|
||||||
|
|
||||||
|
- name: Job summary
|
||||||
|
run: |
|
||||||
|
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/master/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY
|
||||||
109
.github/workflows/arc-publish.yaml
vendored
Normal file
109
.github/workflows/arc-publish.yaml
vendored
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
name: Publish ARC Image
|
||||||
|
|
||||||
|
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||||
|
# for details on why we use this approach
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types:
|
||||||
|
- published
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
release_tag_name:
|
||||||
|
description: 'Tag name of the release to publish'
|
||||||
|
required: true
|
||||||
|
push_to_registries:
|
||||||
|
description: 'Push images to registries'
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
env:
|
||||||
|
TARGET_ORG: actions-runner-controller
|
||||||
|
TARGET_REPO: actions-runner-controller
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release-controller:
|
||||||
|
name: Release
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# gha-runner-scale-set has its own release workflow.
|
||||||
|
# We don't want to publish a new actions-runner-controller image
|
||||||
|
# we release gha-runner-scale-set.
|
||||||
|
if: ${{ !startsWith(github.event.inputs.release_tag_name, 'gha-runner-scale-set-') }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
|
||||||
|
- name: Install tools
|
||||||
|
run: |
|
||||||
|
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.2.0/kubebuilder_2.2.0_linux_amd64.tar.gz
|
||||||
|
tar zxvf kubebuilder_2.2.0_linux_amd64.tar.gz
|
||||||
|
sudo mv kubebuilder_2.2.0_linux_amd64 /usr/local/kubebuilder
|
||||||
|
curl -s https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh | bash
|
||||||
|
sudo mv kustomize /usr/local/bin
|
||||||
|
curl -L -O https://github.com/tcnksm/ghr/releases/download/v0.13.0/ghr_v0.13.0_linux_amd64.tar.gz
|
||||||
|
tar zxvf ghr_v0.13.0_linux_amd64.tar.gz
|
||||||
|
sudo mv ghr_v0.13.0_linux_amd64/ghr /usr/local/bin
|
||||||
|
|
||||||
|
- name: Set version env variable
|
||||||
|
run: |
|
||||||
|
# Define the release tag name based on the event type
|
||||||
|
if [[ "${{ github.event_name }}" == "release" ]]; then
|
||||||
|
echo "VERSION=$(cat ${GITHUB_EVENT_PATH} | jq -r '.release.tag_name')" >> $GITHUB_ENV
|
||||||
|
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||||
|
echo "VERSION=${{ inputs.release_tag_name }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
make github-release
|
||||||
|
|
||||||
|
- name: Get Token
|
||||||
|
id: get_workflow_token
|
||||||
|
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||||
|
with:
|
||||||
|
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||||
|
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||||
|
organization: ${{ env.TARGET_ORG }}
|
||||||
|
|
||||||
|
- name: Resolve push to registries
|
||||||
|
run: |
|
||||||
|
# Define the push to registries based on the event type
|
||||||
|
if [[ "${{ github.event_name }}" == "release" ]]; then
|
||||||
|
echo "PUSH_TO_REGISTRIES=true" >> $GITHUB_ENV
|
||||||
|
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||||
|
echo "PUSH_TO_REGISTRIES=${{ inputs.push_to_registries }}" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Trigger Build And Push Images To Registries
|
||||||
|
run: |
|
||||||
|
# Authenticate
|
||||||
|
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
|
||||||
|
|
||||||
|
# Trigger the workflow run
|
||||||
|
jq -n '{"event_type": "arc", "client_payload": {"release_tag_name": "${{ env.VERSION }}", "push_to_registries": "${{ env.PUSH_TO_REGISTRIES }}" }}' \
|
||||||
|
| gh api -X POST /repos/actions-runner-controller/releases/dispatches --input -
|
||||||
|
|
||||||
|
- name: Job summary
|
||||||
|
run: |
|
||||||
|
echo "The [publish-arc](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-arc.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- Release tag: ${{ env.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- Push to registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-arc.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-arc.yaml)" >> $GITHUB_STEP_SUMMARY
|
||||||
79
.github/workflows/arc-release-runners.yaml
vendored
Normal file
79
.github/workflows/arc-release-runners.yaml
vendored
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
name: Release ARC Runner Images
|
||||||
|
|
||||||
|
# Revert to https://github.com/actions-runner-controller/releases#releases
|
||||||
|
# for details on why we use this approach
|
||||||
|
on:
|
||||||
|
# We must do a trigger on a push: instead of a types: closed so GitHub Secrets
|
||||||
|
# are available to the workflow run
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
paths:
|
||||||
|
- 'runner/VERSION'
|
||||||
|
- '.github/workflows/arc-release-runners.yaml'
|
||||||
|
|
||||||
|
env:
|
||||||
|
# Safeguard to prevent pushing images to registeries after build
|
||||||
|
PUSH_TO_REGISTRIES: true
|
||||||
|
TARGET_ORG: actions-runner-controller
|
||||||
|
TARGET_WORKFLOW: release-runners.yaml
|
||||||
|
DOCKER_VERSION: 24.0.7
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-runners:
|
||||||
|
name: Trigger Build and Push of Runner Images
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Get runner version
|
||||||
|
id: versions
|
||||||
|
run: |
|
||||||
|
runner_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))"
|
||||||
|
container_hooks_current_version="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))"
|
||||||
|
echo runner_version=$runner_current_version >> $GITHUB_OUTPUT
|
||||||
|
echo container_hooks_version=$container_hooks_current_version >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Get Token
|
||||||
|
id: get_workflow_token
|
||||||
|
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||||
|
with:
|
||||||
|
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||||
|
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||||
|
organization: ${{ env.TARGET_ORG }}
|
||||||
|
|
||||||
|
- name: Trigger Build And Push Runner Images To Registries
|
||||||
|
env:
|
||||||
|
RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }}
|
||||||
|
CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }}
|
||||||
|
run: |
|
||||||
|
# Authenticate
|
||||||
|
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
|
||||||
|
|
||||||
|
# Trigger the workflow run
|
||||||
|
gh workflow run ${{ env.TARGET_WORKFLOW }} -R ${{ env.TARGET_ORG }}/releases \
|
||||||
|
-f runner_version=${{ env.RUNNER_VERSION }} \
|
||||||
|
-f docker_version=${{ env.DOCKER_VERSION }} \
|
||||||
|
-f runner_container_hooks_version=${{ env.CONTAINER_HOOKS_VERSION }} \
|
||||||
|
-f sha='${{ github.sha }}' \
|
||||||
|
-f push_to_registries=${{ env.PUSH_TO_REGISTRIES }}
|
||||||
|
|
||||||
|
- name: Job summary
|
||||||
|
env:
|
||||||
|
RUNNER_VERSION: ${{ steps.versions.outputs.runner_version }}
|
||||||
|
CONTAINER_HOOKS_VERSION: ${{ steps.versions.outputs.container_hooks_version }}
|
||||||
|
run: |
|
||||||
|
echo "The [release-runners.yaml](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/release-runners.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- runner_version: ${{ env.RUNNER_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- docker_version: ${{ env.DOCKER_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- runner_container_hooks_version: ${{ env.CONTAINER_HOOKS_VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- push_to_registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/release-runners.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/release-runners.yaml)" >> $GITHUB_STEP_SUMMARY
|
||||||
153
.github/workflows/arc-update-runners-scheduled.yaml
vendored
Normal file
153
.github/workflows/arc-update-runners-scheduled.yaml
vendored
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
# This workflows polls releases from actions/runner and in case of a new one it
|
||||||
|
# updates files containing runner version and opens a pull request.
|
||||||
|
name: Runner Updates Check (Scheduled Job)
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
# run daily
|
||||||
|
- cron: "0 9 * * *"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
# check_versions compares our current version and the latest available runner
|
||||||
|
# version and sets them as outputs.
|
||||||
|
check_versions:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
outputs:
|
||||||
|
runner_current_version: ${{ steps.runner_versions.outputs.runner_current_version }}
|
||||||
|
runner_latest_version: ${{ steps.runner_versions.outputs.runner_latest_version }}
|
||||||
|
container_hooks_current_version: ${{ steps.container_hooks_versions.outputs.container_hooks_current_version }}
|
||||||
|
container_hooks_latest_version: ${{ steps.container_hooks_versions.outputs.container_hooks_latest_version }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Get runner current and latest versions
|
||||||
|
id: runner_versions
|
||||||
|
run: |
|
||||||
|
CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_VERSION=' | cut -d '=' -f2))"
|
||||||
|
echo "Current version: $CURRENT_VERSION"
|
||||||
|
echo runner_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner | grep -oP '(?<=v)[0-9.]+' | head -1)
|
||||||
|
echo "Latest version: $LATEST_VERSION"
|
||||||
|
echo runner_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Get container-hooks current and latest versions
|
||||||
|
id: container_hooks_versions
|
||||||
|
run: |
|
||||||
|
CURRENT_VERSION="$(echo -n $(cat runner/VERSION | grep 'RUNNER_CONTAINER_HOOKS_VERSION=' | cut -d '=' -f2))"
|
||||||
|
echo "Current version: $CURRENT_VERSION"
|
||||||
|
echo container_hooks_current_version=$CURRENT_VERSION >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
LATEST_VERSION=$(gh release list --exclude-drafts --exclude-pre-releases --limit 1 -R actions/runner-container-hooks | grep -oP '(?<=v)[0-9.]+' | head -1)
|
||||||
|
echo "Latest version: $LATEST_VERSION"
|
||||||
|
echo container_hooks_latest_version=$LATEST_VERSION >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# check_pr checks if a PR for the same update already exists. It only runs if
|
||||||
|
# runner latest version != our current version. If no existing PR is found,
|
||||||
|
# it sets a PR name as output.
|
||||||
|
check_pr:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: check_versions
|
||||||
|
if: needs.check_versions.outputs.runner_current_version != needs.check_versions.outputs.runner_latest_version || needs.check_versions.outputs.container_hooks_current_version != needs.check_versions.outputs.container_hooks_latest_version
|
||||||
|
outputs:
|
||||||
|
pr_name: ${{ steps.pr_name.outputs.pr_name }}
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
steps:
|
||||||
|
- name: debug
|
||||||
|
run:
|
||||||
|
echo "RUNNER_CURRENT_VERSION=${{ needs.check_versions.outputs.runner_current_version }}"
|
||||||
|
echo "RUNNER_LATEST_VERSION=${{ needs.check_versions.outputs.runner_latest_version }}"
|
||||||
|
echo "CONTAINER_HOOKS_CURRENT_VERSION=${{ needs.check_versions.outputs.container_hooks_current_version }}"
|
||||||
|
echo "CONTAINER_HOOKS_LATEST_VERSION=${{ needs.check_versions.outputs.container_hooks_latest_version }}"
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: PR Name
|
||||||
|
id: pr_name
|
||||||
|
env:
|
||||||
|
RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }}
|
||||||
|
RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }}
|
||||||
|
CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }}
|
||||||
|
CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }}
|
||||||
|
# Generate a PR name with the following title:
|
||||||
|
# Updates: runner to v2.304.0 and container-hooks to v0.3.1
|
||||||
|
run: |
|
||||||
|
RUNNER_MESSAGE="runner to v${RUNNER_LATEST_VERSION}"
|
||||||
|
CONTAINER_HOOKS_MESSAGE="container-hooks to v${CONTAINER_HOOKS_LATEST_VERSION}"
|
||||||
|
|
||||||
|
PR_NAME="Updates:"
|
||||||
|
if [ "$RUNNER_CURRENT_VERSION" != "$RUNNER_LATEST_VERSION" ]
|
||||||
|
then
|
||||||
|
PR_NAME="$PR_NAME $RUNNER_MESSAGE"
|
||||||
|
fi
|
||||||
|
if [ "$CONTAINER_HOOKS_CURRENT_VERSION" != "$CONTAINER_HOOKS_LATEST_VERSION" ]
|
||||||
|
then
|
||||||
|
PR_NAME="$PR_NAME $CONTAINER_HOOKS_MESSAGE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
result=$(gh pr list --search "$PR_NAME" --json number --jq ".[].number" --limit 1)
|
||||||
|
if [ -z "$result" ]
|
||||||
|
then
|
||||||
|
echo "No existing PRs found, setting output with pr_name=$PR_NAME"
|
||||||
|
echo pr_name=$PR_NAME >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "Found a PR with title '$PR_NAME' already existing: ${{ github.server_url }}/${{ github.repository }}/pull/$result"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# update_version updates runner version in the files listed below, commits
|
||||||
|
# the changes and opens a pull request as `github-actions` bot.
|
||||||
|
update_version:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs:
|
||||||
|
- check_versions
|
||||||
|
- check_pr
|
||||||
|
if: needs.check_pr.outputs.pr_name
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
contents: write
|
||||||
|
actions: write
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
|
RUNNER_CURRENT_VERSION: ${{ needs.check_versions.outputs.runner_current_version }}
|
||||||
|
RUNNER_LATEST_VERSION: ${{ needs.check_versions.outputs.runner_latest_version }}
|
||||||
|
CONTAINER_HOOKS_CURRENT_VERSION: ${{ needs.check_versions.outputs.container_hooks_current_version }}
|
||||||
|
CONTAINER_HOOKS_LATEST_VERSION: ${{ needs.check_versions.outputs.container_hooks_latest_version }}
|
||||||
|
PR_NAME: ${{ needs.check_pr.outputs.pr_name }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: New branch
|
||||||
|
run: git checkout -b update-runner-"$(date +%Y-%m-%d)"
|
||||||
|
|
||||||
|
- name: Update files
|
||||||
|
run: |
|
||||||
|
CURRENT_VERSION="${RUNNER_CURRENT_VERSION//./\\.}"
|
||||||
|
LATEST_VERSION="${RUNNER_LATEST_VERSION//./\\.}"
|
||||||
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
|
||||||
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
|
||||||
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
|
||||||
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
|
||||||
|
|
||||||
|
CURRENT_VERSION="${CONTAINER_HOOKS_CURRENT_VERSION//./\\.}"
|
||||||
|
LATEST_VERSION="${CONTAINER_HOOKS_LATEST_VERSION//./\\.}"
|
||||||
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/VERSION
|
||||||
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" runner/Makefile
|
||||||
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" Makefile
|
||||||
|
sed -i "s/$CURRENT_VERSION/$LATEST_VERSION/g" test/e2e/e2e_test.go
|
||||||
|
|
||||||
|
- name: Commit changes
|
||||||
|
run: |
|
||||||
|
# from https://github.com/orgs/community/discussions/26560
|
||||||
|
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||||
|
git config user.name "github-actions[bot]"
|
||||||
|
git add .
|
||||||
|
git commit -m "$PR_NAME"
|
||||||
|
git push -u origin HEAD
|
||||||
|
|
||||||
|
- name: Create pull request
|
||||||
|
run: gh pr create -f -l "runners update"
|
||||||
103
.github/workflows/arc-validate-chart.yaml
vendored
Normal file
103
.github/workflows/arc-validate-chart.yaml
vendored
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
name: Validate Helm Chart
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths:
|
||||||
|
- 'charts/**'
|
||||||
|
- '.github/workflows/arc-validate-chart.yaml'
|
||||||
|
- '!charts/actions-runner-controller/docs/**'
|
||||||
|
- '!**.md'
|
||||||
|
- '!charts/gha-runner-scale-set-controller/**'
|
||||||
|
- '!charts/gha-runner-scale-set/**'
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- 'charts/**'
|
||||||
|
- '.github/workflows/arc-validate-chart.yaml'
|
||||||
|
- '!charts/actions-runner-controller/docs/**'
|
||||||
|
- '!**.md'
|
||||||
|
- '!charts/gha-runner-scale-set-controller/**'
|
||||||
|
- '!charts/gha-runner-scale-set/**'
|
||||||
|
workflow_dispatch:
|
||||||
|
env:
|
||||||
|
KUBE_SCORE_VERSION: 1.10.0
|
||||||
|
HELM_VERSION: v3.8.0
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
|
# but not pushes to master branch by making the concurrency group name unique
|
||||||
|
# for pushes
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
validate-chart:
|
||||||
|
name: Lint Chart
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Helm
|
||||||
|
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
|
||||||
|
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||||
|
with:
|
||||||
|
version: ${{ env.HELM_VERSION }}
|
||||||
|
|
||||||
|
- name: Set up kube-score
|
||||||
|
run: |
|
||||||
|
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||||
|
chmod 755 kube-score
|
||||||
|
|
||||||
|
- name: Kube-score generated manifests
|
||||||
|
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
|
||||||
|
--ignore-test pod-networkpolicy
|
||||||
|
--ignore-test deployment-has-poddisruptionbudget
|
||||||
|
--ignore-test deployment-has-host-podantiaffinity
|
||||||
|
--ignore-test container-security-context
|
||||||
|
--ignore-test pod-probes
|
||||||
|
--ignore-test container-image-tag
|
||||||
|
--enable-optional-test container-security-context-privileged
|
||||||
|
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||||
|
|
||||||
|
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Set up chart-testing
|
||||||
|
uses: helm/chart-testing-action@v2.6.0
|
||||||
|
|
||||||
|
- name: Run chart-testing (list-changed)
|
||||||
|
id: list-changed
|
||||||
|
run: |
|
||||||
|
changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
|
||||||
|
if [[ -n "$changed" ]]; then
|
||||||
|
echo "changed=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Run chart-testing (lint)
|
||||||
|
run: |
|
||||||
|
ct lint --config charts/.ci/ct-config.yaml
|
||||||
|
|
||||||
|
- name: Create kind cluster
|
||||||
|
uses: helm/kind-action@v1.4.0
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
|
||||||
|
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||||
|
- name: Install cert-manager
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
run: |
|
||||||
|
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||||
|
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||||
|
|
||||||
|
- name: Run chart-testing (install)
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
run: |
|
||||||
|
ct install --config charts/.ci/ct-config.yaml
|
||||||
52
.github/workflows/arc-validate-runners.yaml
vendored
Normal file
52
.github/workflows/arc-validate-runners.yaml
vendored
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
name: Validate ARC Runners
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- '**'
|
||||||
|
paths:
|
||||||
|
- 'runner/**'
|
||||||
|
- 'test/startup/**'
|
||||||
|
- '!**.md'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
|
# but not pushes to master branch by making the concurrency group name unique
|
||||||
|
# for pushes
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
shellcheck:
|
||||||
|
name: runner / shellcheck
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: shellcheck
|
||||||
|
uses: reviewdog/action-shellcheck@v1
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
path: "./runner"
|
||||||
|
pattern: |
|
||||||
|
*.sh
|
||||||
|
*.bash
|
||||||
|
update-status
|
||||||
|
# Make this consistent with `make shellsheck`
|
||||||
|
shellcheck_flags: "--shell bash --source-path runner"
|
||||||
|
exclude: "./.git/*"
|
||||||
|
check_all_files_with_shebangs: "false"
|
||||||
|
# Set this to "true" once we addressed all the shellcheck findings
|
||||||
|
fail_on_error: "false"
|
||||||
|
test-runner-entrypoint:
|
||||||
|
name: Test entrypoint
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: |
|
||||||
|
make acceptance/runner/startup
|
||||||
20
.github/workflows/gha-e2e-tests.yaml
vendored
20
.github/workflows/gha-e2e-tests.yaml
vendored
@@ -16,7 +16,7 @@ env:
|
|||||||
TARGET_ORG: actions-runner-controller
|
TARGET_ORG: actions-runner-controller
|
||||||
TARGET_REPO: arc_e2e_test_dummy
|
TARGET_REPO: arc_e2e_test_dummy
|
||||||
IMAGE_NAME: "arc-test-image"
|
IMAGE_NAME: "arc-test-image"
|
||||||
IMAGE_VERSION: "0.9.2"
|
IMAGE_VERSION: "0.9.3"
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
# This will make sure we only apply the concurrency limits on pull requests
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
@@ -33,7 +33,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: ${{github.head_ref}}
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
@@ -122,7 +122,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: ${{github.head_ref}}
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
@@ -213,7 +213,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
WORKFLOW_FILE: arc-test-dind-workflow.yaml
|
WORKFLOW_FILE: arc-test-dind-workflow.yaml
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: ${{github.head_ref}}
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
@@ -303,7 +303,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
|
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: ${{github.head_ref}}
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
@@ -402,7 +402,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: ${{github.head_ref}}
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
@@ -503,7 +503,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: ${{github.head_ref}}
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
@@ -598,7 +598,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: ${{github.head_ref}}
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
@@ -718,7 +718,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml"
|
WORKFLOW_FILE: "arc-test-sleepy-matrix.yaml"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: ${{github.head_ref}}
|
ref: ${{github.head_ref}}
|
||||||
|
|
||||||
@@ -888,7 +888,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
WORKFLOW_FILE: arc-test-workflow.yaml
|
WORKFLOW_FILE: arc-test-workflow.yaml
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.head_ref }}
|
ref: ${{ github.head_ref }}
|
||||||
|
|
||||||
|
|||||||
212
.github/workflows/gha-publish-chart.yaml
vendored
Normal file
212
.github/workflows/gha-publish-chart.yaml
vendored
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
name: (gha) Publish Helm Charts
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
ref:
|
||||||
|
description: 'The branch, tag or SHA to cut a release from'
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: ''
|
||||||
|
release_tag_name:
|
||||||
|
description: 'The name to tag the controller image with'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
default: 'canary'
|
||||||
|
push_to_registries:
|
||||||
|
description: 'Push images to registries'
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
publish_gha_runner_scale_set_controller_chart:
|
||||||
|
description: 'Publish new helm chart for gha-runner-scale-set-controller'
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
publish_gha_runner_scale_set_chart:
|
||||||
|
description: 'Publish new helm chart for gha-runner-scale-set'
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
|
||||||
|
env:
|
||||||
|
HELM_VERSION: v3.8.0
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-push-image:
|
||||||
|
name: Build and push controller image
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
# If inputs.ref is empty, it'll resolve to the default branch
|
||||||
|
ref: ${{ inputs.ref }}
|
||||||
|
|
||||||
|
- name: Check chart versions
|
||||||
|
# Binary version and chart versions need to match.
|
||||||
|
# In case of an upgrade, the controller will try to clean up
|
||||||
|
# resources with older versions that should have been cleaned up
|
||||||
|
# during the upgrade process
|
||||||
|
run: ./hack/check-gh-chart-versions.sh ${{ inputs.release_tag_name }}
|
||||||
|
|
||||||
|
- name: Resolve parameters
|
||||||
|
id: resolve_parameters
|
||||||
|
run: |
|
||||||
|
resolvedRef="${{ inputs.ref }}"
|
||||||
|
if [ -z "$resolvedRef" ]
|
||||||
|
then
|
||||||
|
resolvedRef="${{ github.ref }}"
|
||||||
|
fi
|
||||||
|
echo "resolved_ref=$resolvedRef" >> $GITHUB_OUTPUT
|
||||||
|
echo "INFO: Resolving short SHA for $resolvedRef"
|
||||||
|
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
|
||||||
|
echo "INFO: Normalizing repository name (lowercase)"
|
||||||
|
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
with:
|
||||||
|
# Pinning v0.9.1 for Buildx and BuildKit v0.10.6
|
||||||
|
# BuildKit v0.11 which has a bug causing intermittent
|
||||||
|
# failures pushing images to GHCR
|
||||||
|
version: v0.9.1
|
||||||
|
driver-opts: image=moby/buildkit:v0.10.6
|
||||||
|
|
||||||
|
- name: Login to GitHub Container Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build & push controller image
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
file: Dockerfile
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
build-args: VERSION=${{ inputs.release_tag_name }}
|
||||||
|
push: ${{ inputs.push_to_registries }}
|
||||||
|
tags: |
|
||||||
|
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}
|
||||||
|
ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/gha-runner-scale-set-controller:${{ inputs.release_tag_name }}-${{ steps.resolve_parameters.outputs.short_sha }}
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
|
- name: Job summary
|
||||||
|
run: |
|
||||||
|
echo "The [gha-publish-chart.yaml](https://github.com/actions/actions-runner-controller/blob/main/.github/workflows/gha-publish-chart.yaml) workflow run was completed successfully!" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- Release tag: ${{ inputs.release_tag_name }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- Push to registries: ${{ inputs.push_to_registries }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
publish-helm-chart-gha-runner-scale-set-controller:
|
||||||
|
if: ${{ inputs.publish_gha_runner_scale_set_controller_chart == true }}
|
||||||
|
needs: build-push-image
|
||||||
|
name: Publish Helm chart for gha-runner-scale-set-controller
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
# If inputs.ref is empty, it'll resolve to the default branch
|
||||||
|
ref: ${{ inputs.ref }}
|
||||||
|
|
||||||
|
- name: Resolve parameters
|
||||||
|
id: resolve_parameters
|
||||||
|
run: |
|
||||||
|
resolvedRef="${{ inputs.ref }}"
|
||||||
|
if [ -z "$resolvedRef" ]
|
||||||
|
then
|
||||||
|
resolvedRef="${{ github.ref }}"
|
||||||
|
fi
|
||||||
|
echo "INFO: Resolving short SHA for $resolvedRef"
|
||||||
|
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
|
||||||
|
echo "INFO: Normalizing repository name (lowercase)"
|
||||||
|
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set up Helm
|
||||||
|
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
|
||||||
|
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||||
|
with:
|
||||||
|
version: ${{ env.HELM_VERSION }}
|
||||||
|
|
||||||
|
- name: Publish new helm chart for gha-runner-scale-set-controller
|
||||||
|
run: |
|
||||||
|
echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin
|
||||||
|
GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG=$(cat charts/gha-runner-scale-set-controller/Chart.yaml | grep version: | cut -d " " -f 2)
|
||||||
|
echo "GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG=${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}" >> $GITHUB_ENV
|
||||||
|
helm package charts/gha-runner-scale-set-controller/ --version="${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}"
|
||||||
|
helm push gha-runner-scale-set-controller-"${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts
|
||||||
|
|
||||||
|
- name: Job summary
|
||||||
|
run: |
|
||||||
|
echo "New helm chart for gha-runner-scale-set-controller published successfully!" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- gha-runner-scale-set-controller Chart version: ${{ env.GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
publish-helm-chart-gha-runner-scale-set:
|
||||||
|
if: ${{ inputs.publish_gha_runner_scale_set_chart == true }}
|
||||||
|
needs: build-push-image
|
||||||
|
name: Publish Helm chart for gha-runner-scale-set
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
# If inputs.ref is empty, it'll resolve to the default branch
|
||||||
|
ref: ${{ inputs.ref }}
|
||||||
|
|
||||||
|
- name: Resolve parameters
|
||||||
|
id: resolve_parameters
|
||||||
|
run: |
|
||||||
|
resolvedRef="${{ inputs.ref }}"
|
||||||
|
if [ -z "$resolvedRef" ]
|
||||||
|
then
|
||||||
|
resolvedRef="${{ github.ref }}"
|
||||||
|
fi
|
||||||
|
echo "INFO: Resolving short SHA for $resolvedRef"
|
||||||
|
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
|
||||||
|
echo "INFO: Normalizing repository name (lowercase)"
|
||||||
|
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set up Helm
|
||||||
|
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
|
||||||
|
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||||
|
with:
|
||||||
|
version: ${{ env.HELM_VERSION }}
|
||||||
|
|
||||||
|
- name: Publish new helm chart for gha-runner-scale-set
|
||||||
|
run: |
|
||||||
|
echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin
|
||||||
|
|
||||||
|
GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG=$(cat charts/gha-runner-scale-set/Chart.yaml | grep version: | cut -d " " -f 2)
|
||||||
|
echo "GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG=${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}" >> $GITHUB_ENV
|
||||||
|
helm package charts/gha-runner-scale-set/ --version="${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}"
|
||||||
|
helm push gha-runner-scale-set-"${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts
|
||||||
|
|
||||||
|
- name: Job summary
|
||||||
|
run: |
|
||||||
|
echo "New helm chart for gha-runner-scale-set published successfully!" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- gha-runner-scale-set Chart version: ${{ env.GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY
|
||||||
125
.github/workflows/gha-validate-chart.yaml
vendored
Normal file
125
.github/workflows/gha-validate-chart.yaml
vendored
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
name: (gha) Validate Helm Charts
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths:
|
||||||
|
- 'charts/**'
|
||||||
|
- '.github/workflows/gha-validate-chart.yaml'
|
||||||
|
- '!charts/actions-runner-controller/**'
|
||||||
|
- '!**.md'
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- 'charts/**'
|
||||||
|
- '.github/workflows/gha-validate-chart.yaml'
|
||||||
|
- '!charts/actions-runner-controller/**'
|
||||||
|
- '!**.md'
|
||||||
|
workflow_dispatch:
|
||||||
|
env:
|
||||||
|
KUBE_SCORE_VERSION: 1.16.1
|
||||||
|
HELM_VERSION: v3.8.0
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
# This will make sure we only apply the concurrency limits on pull requests
|
||||||
|
# but not pushes to master branch by making the concurrency group name unique
|
||||||
|
# for pushes
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
validate-chart:
|
||||||
|
name: Lint Chart
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Helm
|
||||||
|
# Using https://github.com/Azure/setup-helm/releases/tag/v4.2
|
||||||
|
uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814
|
||||||
|
with:
|
||||||
|
version: ${{ env.HELM_VERSION }}
|
||||||
|
|
||||||
|
- name: Set up kube-score
|
||||||
|
run: |
|
||||||
|
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
|
||||||
|
chmod 755 kube-score
|
||||||
|
|
||||||
|
- name: Kube-score generated manifests
|
||||||
|
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score -
|
||||||
|
--ignore-test pod-networkpolicy
|
||||||
|
--ignore-test deployment-has-poddisruptionbudget
|
||||||
|
--ignore-test deployment-has-host-podantiaffinity
|
||||||
|
--ignore-test container-security-context
|
||||||
|
--ignore-test pod-probes
|
||||||
|
--ignore-test container-image-tag
|
||||||
|
--enable-optional-test container-security-context-privileged
|
||||||
|
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||||
|
|
||||||
|
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Set up chart-testing
|
||||||
|
uses: helm/chart-testing-action@v2.6.0
|
||||||
|
|
||||||
|
- name: Run chart-testing (list-changed)
|
||||||
|
id: list-changed
|
||||||
|
run: |
|
||||||
|
ct version
|
||||||
|
changed=$(ct list-changed --config charts/.ci/ct-config-gha.yaml)
|
||||||
|
if [[ -n "$changed" ]]; then
|
||||||
|
echo "changed=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Run chart-testing (lint)
|
||||||
|
run: |
|
||||||
|
ct lint --config charts/.ci/ct-config-gha.yaml
|
||||||
|
|
||||||
|
- name: Set up docker buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
|
||||||
|
- name: Build controller image
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
with:
|
||||||
|
file: Dockerfile
|
||||||
|
platforms: linux/amd64
|
||||||
|
load: true
|
||||||
|
build-args: |
|
||||||
|
DOCKER_IMAGE_NAME=test-arc
|
||||||
|
VERSION=dev
|
||||||
|
tags: |
|
||||||
|
test-arc:dev
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
|
- name: Create kind cluster
|
||||||
|
uses: helm/kind-action@v1.4.0
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
with:
|
||||||
|
cluster_name: chart-testing
|
||||||
|
|
||||||
|
- name: Load image into cluster
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
run: |
|
||||||
|
export DOCKER_IMAGE_NAME=test-arc
|
||||||
|
export VERSION=dev
|
||||||
|
export IMG_RESULT=load
|
||||||
|
make docker-buildx
|
||||||
|
kind load docker-image test-arc:dev --name chart-testing
|
||||||
|
|
||||||
|
- name: Run chart-testing (install)
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
run: |
|
||||||
|
ct install --config charts/.ci/ct-config-gha.yaml
|
||||||
49
.github/workflows/global-publish-canary.yaml
vendored
49
.github/workflows/global-publish-canary.yaml
vendored
@@ -46,15 +46,54 @@ env:
|
|||||||
PUSH_TO_REGISTRIES: true
|
PUSH_TO_REGISTRIES: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
legacy-canary-build:
|
||||||
|
name: Build and Publish Legacy Canary Image
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
TARGET_ORG: actions-runner-controller
|
||||||
|
TARGET_REPO: actions-runner-controller
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Get Token
|
||||||
|
id: get_workflow_token
|
||||||
|
uses: peter-murray/workflow-application-token-action@dc0413987a085fa17d19df9e47d4677cf81ffef3
|
||||||
|
with:
|
||||||
|
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
|
||||||
|
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
|
||||||
|
organization: ${{ env.TARGET_ORG }}
|
||||||
|
|
||||||
|
- name: Trigger Build And Push Images To Registries
|
||||||
|
run: |
|
||||||
|
# Authenticate
|
||||||
|
gh auth login --with-token <<< ${{ steps.get_workflow_token.outputs.token }}
|
||||||
|
|
||||||
|
# Trigger the workflow run
|
||||||
|
jq -n '{"event_type": "canary", "client_payload": {"sha": "${{ github.sha }}", "push_to_registries": ${{ env.PUSH_TO_REGISTRIES }}}}' \
|
||||||
|
| gh api -X POST /repos/actions-runner-controller/releases/dispatches --input -
|
||||||
|
|
||||||
|
- name: Job summary
|
||||||
|
run: |
|
||||||
|
echo "The [publish-canary](https://github.com/actions-runner-controller/releases/blob/main/.github/workflows/publish-canary.yaml) workflow has been triggered!" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- sha: ${{ github.sha }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- Push to registries: ${{ env.PUSH_TO_REGISTRIES }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "[https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml](https://github.com/actions-runner-controller/releases/actions/workflows/publish-canary.yaml)" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
canary-build:
|
canary-build:
|
||||||
name: Build and Publish gha-runner-scale-set-controller Canary Image
|
name: Build and Publish gha-runner-scale-set-controller Canary Image
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
@@ -71,16 +110,16 @@ jobs:
|
|||||||
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
|
|
||||||
# Unstable builds - run at your own risk
|
# Unstable builds - run at your own risk
|
||||||
- name: Build and Push
|
- name: Build and Push
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ./Dockerfile
|
file: ./Dockerfile
|
||||||
|
|||||||
4
.github/workflows/global-run-codeql.yaml
vendored
4
.github/workflows/global-run-codeql.yaml
vendored
@@ -25,10 +25,10 @@ jobs:
|
|||||||
security-events: write
|
security-events: write
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
|
|
||||||
|
|||||||
29
.github/workflows/global-run-first-interaction.yaml
vendored
Normal file
29
.github/workflows/global-run-first-interaction.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
name: First Interaction
|
||||||
|
|
||||||
|
on:
|
||||||
|
issues:
|
||||||
|
types: [opened]
|
||||||
|
pull_request:
|
||||||
|
branches: [master]
|
||||||
|
types: [opened]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check_for_first_interaction:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/first-interaction@main
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
issue-message: |
|
||||||
|
Hello! Thank you for filing an issue.
|
||||||
|
|
||||||
|
The maintainers will triage your issue shortly.
|
||||||
|
|
||||||
|
In the meantime, please take a look at the [troubleshooting guide](https://github.com/actions/actions-runner-controller/blob/master/TROUBLESHOOTING.md) for bug reports.
|
||||||
|
|
||||||
|
If this is a feature request, please review our [contribution guidelines](https://github.com/actions/actions-runner-controller/blob/master/CONTRIBUTING.md).
|
||||||
|
pr-message: |
|
||||||
|
Hello! Thank you for your contribution.
|
||||||
|
|
||||||
|
Please review our [contribution guidelines](https://github.com/actions/actions-runner-controller/blob/master/CONTRIBUTING.md) to understand the project's testing and code conventions.
|
||||||
25
.github/workflows/global-run-stale.yaml
vendored
Normal file
25
.github/workflows/global-run-stale.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
name: Run Stale Bot
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '30 1 * * *'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
stale:
|
||||||
|
name: Run Stale
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write # for actions/stale to close stale issues
|
||||||
|
pull-requests: write # for actions/stale to close stale PRs
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v6
|
||||||
|
with:
|
||||||
|
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
|
||||||
|
# turn off stale for both issues and PRs
|
||||||
|
days-before-stale: -1
|
||||||
|
# turn stale back on for issues only
|
||||||
|
days-before-issue-stale: 30
|
||||||
|
days-before-issue-close: 14
|
||||||
|
exempt-issue-labels: 'pinned,security,enhancement,refactor,documentation,chore,bug,dependencies,needs-investigation'
|
||||||
18
.github/workflows/go.yaml
vendored
18
.github/workflows/go.yaml
vendored
@@ -29,8 +29,8 @@ jobs:
|
|||||||
fmt:
|
fmt:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-go@v4
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: 'go.mod'
|
go-version-file: 'go.mod'
|
||||||
cache: false
|
cache: false
|
||||||
@@ -42,13 +42,13 @@ jobs:
|
|||||||
lint:
|
lint:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-go@v4
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: 'go.mod'
|
go-version-file: 'go.mod'
|
||||||
cache: false
|
cache: false
|
||||||
- name: golangci-lint
|
- name: golangci-lint
|
||||||
uses: golangci/golangci-lint-action@v3
|
uses: golangci/golangci-lint-action@v6
|
||||||
with:
|
with:
|
||||||
only-new-issues: true
|
only-new-issues: true
|
||||||
version: v1.55.2
|
version: v1.55.2
|
||||||
@@ -56,8 +56,8 @@ jobs:
|
|||||||
generate:
|
generate:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-go@v4
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: 'go.mod'
|
go-version-file: 'go.mod'
|
||||||
cache: false
|
cache: false
|
||||||
@@ -69,8 +69,8 @@ jobs:
|
|||||||
test:
|
test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-go@v4
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: 'go.mod'
|
go-version-file: 'go.mod'
|
||||||
- run: make manifests
|
- run: make manifests
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Build the manager binary
|
# Build the manager binary
|
||||||
FROM --platform=$BUILDPLATFORM golang:1.22.1 as builder
|
FROM --platform=$BUILDPLATFORM golang:1.22.4 as builder
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
|||||||
4
Makefile
4
Makefile
@@ -6,7 +6,7 @@ endif
|
|||||||
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
DOCKER_USER ?= $(shell echo ${DOCKER_IMAGE_NAME} | cut -d / -f1)
|
||||||
VERSION ?= dev
|
VERSION ?= dev
|
||||||
COMMIT_SHA = $(shell git rev-parse HEAD)
|
COMMIT_SHA = $(shell git rev-parse HEAD)
|
||||||
RUNNER_VERSION ?= 2.316.1
|
RUNNER_VERSION ?= 2.319.1
|
||||||
TARGETPLATFORM ?= $(shell arch)
|
TARGETPLATFORM ?= $(shell arch)
|
||||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||||
RUNNER_TAG ?= ${VERSION}
|
RUNNER_TAG ?= ${VERSION}
|
||||||
@@ -310,7 +310,7 @@ github-release: release
|
|||||||
# Otherwise we get errors like the below:
|
# Otherwise we get errors like the below:
|
||||||
# Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property]
|
# Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property]
|
||||||
#
|
#
|
||||||
# Note that controller-gen newer than 0.6.0 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
|
# Note that controller-gen newer than 0.6.1 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
|
||||||
# Otherwise ObjectMeta embedded in Spec results in empty on the storage.
|
# Otherwise ObjectMeta embedded in Spec results in empty on the storage.
|
||||||
controller-gen:
|
controller-gen:
|
||||||
ifeq (, $(shell which controller-gen))
|
ifeq (, $(shell which controller-gen))
|
||||||
|
|||||||
@@ -15,13 +15,13 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.9.2
|
version: 0.9.3
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "0.9.2"
|
appVersion: "0.9.3"
|
||||||
|
|
||||||
home: https://github.com/actions/actions-runner-controller
|
home: https://github.com/actions/actions-runner-controller
|
||||||
|
|
||||||
|
|||||||
@@ -79,6 +79,9 @@ spec:
|
|||||||
- "--listener-metrics-endpoint="
|
- "--listener-metrics-endpoint="
|
||||||
- "--metrics-addr=0"
|
- "--metrics-addr=0"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- range .Values.flags.excludeLabelPropagationPrefixes }}
|
||||||
|
- "--exclude-label-propagation-prefix={{ . }}"
|
||||||
|
{{- end }}
|
||||||
command:
|
command:
|
||||||
- "/manager"
|
- "/manager"
|
||||||
{{- with .Values.metrics }}
|
{{- with .Values.metrics }}
|
||||||
|
|||||||
@@ -1035,3 +1035,41 @@ func TestControllerDeployment_MetricsPorts(t *testing.T) {
|
|||||||
assert.Equal(t, value.frequency, 1, fmt.Sprintf("frequency of %q is not 1", key))
|
assert.Equal(t, value.frequency, 1, fmt.Sprintf("frequency of %q is not 1", key))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDeployment_excludeLabelPropagationPrefixes(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
// Path to the helm chart we will test
|
||||||
|
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
chart := new(Chart)
|
||||||
|
err = yaml.Unmarshal(chartContent, chart)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
releaseName := "test-arc"
|
||||||
|
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||||
|
|
||||||
|
options := &helm.Options{
|
||||||
|
Logger: logger.Discard,
|
||||||
|
SetValues: map[string]string{
|
||||||
|
"flags.excludeLabelPropagationPrefixes[0]": "prefix.com/",
|
||||||
|
"flags.excludeLabelPropagationPrefixes[1]": "complete.io/label",
|
||||||
|
},
|
||||||
|
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
|
||||||
|
|
||||||
|
var deployment appsv1.Deployment
|
||||||
|
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||||
|
|
||||||
|
require.Len(t, deployment.Spec.Template.Spec.Containers, 1, "Expected one container")
|
||||||
|
container := deployment.Spec.Template.Spec.Containers[0]
|
||||||
|
|
||||||
|
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=prefix.com/")
|
||||||
|
assert.Contains(t, container.Args, "--exclude-label-propagation-prefix=complete.io/label")
|
||||||
|
}
|
||||||
|
|||||||
@@ -121,3 +121,12 @@ flags:
|
|||||||
## This can lead to a longer time to apply the change but it will ensure
|
## This can lead to a longer time to apply the change but it will ensure
|
||||||
## that you don't have any overprovisioning of runners.
|
## that you don't have any overprovisioning of runners.
|
||||||
updateStrategy: "immediate"
|
updateStrategy: "immediate"
|
||||||
|
|
||||||
|
## Defines a list of prefixes that should not be propagated to internal resources.
|
||||||
|
## This is useful when you have labels that are used for internal purposes and should not be propagated to internal resources.
|
||||||
|
## See https://github.com/actions/actions-runner-controller/issues/3533 for more information.
|
||||||
|
##
|
||||||
|
## By default, all labels are propagated to internal resources
|
||||||
|
## Labels that match prefix specified in the list are excluded from propagation.
|
||||||
|
# excludeLabelPropagationPrefixes:
|
||||||
|
# - "argocd.argoproj.io/instance"
|
||||||
|
|||||||
@@ -15,13 +15,13 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.9.2
|
version: 0.9.3
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "0.9.2"
|
appVersion: "0.9.3"
|
||||||
|
|
||||||
home: https://github.com/actions/actions-runner-controller
|
home: https://github.com/actions/actions-runner-controller
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import (
|
|||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/worker"
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -106,9 +105,6 @@ func New(config config.Config) (*App, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (app *App) Run(ctx context.Context) error {
|
func (app *App) Run(ctx context.Context) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "App.Run")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var errs []error
|
var errs []error
|
||||||
if app.worker == nil {
|
if app.worker == nil {
|
||||||
errs = append(errs, fmt.Errorf("worker not initialized"))
|
errs = append(errs, fmt.Errorf("worker not initialized"))
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
|
|
||||||
listener "github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
|
listener "github.com/actions/actions-runner-controller/cmd/ghalistener/listener"
|
||||||
mock "github.com/stretchr/testify/mock"
|
mock "github.com/stretchr/testify/mock"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Listener is an autogenerated mock type for the Listener type
|
// Listener is an autogenerated mock type for the Listener type
|
||||||
@@ -17,9 +16,6 @@ type Listener struct {
|
|||||||
|
|
||||||
// Listen provides a mock function with given fields: ctx, handler
|
// Listen provides a mock function with given fields: ctx, handler
|
||||||
func (_m *Listener) Listen(ctx context.Context, handler listener.Handler) error {
|
func (_m *Listener) Listen(ctx context.Context, handler listener.Handler) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.Listen")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, handler)
|
ret := _m.Called(ctx, handler)
|
||||||
|
|
||||||
var r0 error
|
var r0 error
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ package mocks
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
|
|
||||||
context "context"
|
context "context"
|
||||||
|
|
||||||
@@ -18,9 +17,6 @@ type Worker struct {
|
|||||||
|
|
||||||
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, acquireCount
|
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, acquireCount
|
||||||
func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, acquireCount int) (int, error) {
|
func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, acquireCount int) (int, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleDesiredRunnerCount")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, count, acquireCount)
|
ret := _m.Called(ctx, count, acquireCount)
|
||||||
|
|
||||||
var r0 int
|
var r0 int
|
||||||
@@ -45,9 +41,6 @@ func (_m *Worker) HandleDesiredRunnerCount(ctx context.Context, count int, acqui
|
|||||||
|
|
||||||
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
|
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
|
||||||
func (_m *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
func (_m *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleJobStarted")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, jobInfo)
|
ret := _m.Called(ctx, jobInfo)
|
||||||
|
|
||||||
var r0 error
|
var r0 error
|
||||||
|
|||||||
@@ -7,16 +7,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/metrics"
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
"go.opentelemetry.io/otel/trace"
|
|
||||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -129,9 +125,6 @@ type Handler interface {
|
|||||||
// The handler is responsible for handling the initial message and subsequent messages.
|
// The handler is responsible for handling the initial message and subsequent messages.
|
||||||
// If an error occurs during any step, Listen returns an error.
|
// If an error occurs during any step, Listen returns an error.
|
||||||
func (l *Listener) Listen(ctx context.Context, handler Handler) error {
|
func (l *Listener) Listen(ctx context.Context, handler Handler) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.Listen")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if err := l.createSession(ctx); err != nil {
|
if err := l.createSession(ctx); err != nil {
|
||||||
return fmt.Errorf("createSession failed: %w", err)
|
return fmt.Errorf("createSession failed: %w", err)
|
||||||
}
|
}
|
||||||
@@ -167,149 +160,34 @@ func (l *Listener) Listen(ctx context.Context, handler Handler) error {
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.Listen.loop", trace.WithNewRoot())
|
|
||||||
|
|
||||||
msg, err := l.getMessage(ctx)
|
msg, err := l.getMessage(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
span.End()
|
|
||||||
return fmt.Errorf("failed to get message: %w", err)
|
return fmt.Errorf("failed to get message: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if msg == nil {
|
if msg == nil {
|
||||||
_, err := handler.HandleDesiredRunnerCount(ctx, 0, 0)
|
_, err := handler.HandleDesiredRunnerCount(ctx, 0, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
span.End()
|
|
||||||
return fmt.Errorf("handling nil message failed: %w", err)
|
return fmt.Errorf("handling nil message failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
span.End()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove cancellation from the context to avoid cancelling the message handling.
|
// Remove cancellation from the context to avoid cancelling the message handling.
|
||||||
if err := l.handleMessage(context.WithoutCancel(ctx), handler, msg); err != nil {
|
if err := l.handleMessage(context.WithoutCancel(ctx), handler, msg); err != nil {
|
||||||
span.End()
|
|
||||||
return fmt.Errorf("failed to handle message: %w", err)
|
return fmt.Errorf("failed to handle message: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
span.End()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type tracedJob struct {
|
|
||||||
jobSpan tracer.Span
|
|
||||||
runnerSetAssignSpan tracer.Span
|
|
||||||
runnerAssignSpan tracer.Span
|
|
||||||
runnerRunJobSpan tracer.Span
|
|
||||||
}
|
|
||||||
|
|
||||||
var mu sync.Mutex
|
|
||||||
var tracedJobs map[string]*tracedJob
|
|
||||||
|
|
||||||
func (l *Listener) progressTraces(parsedMsg *parsedMessage) {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
|
|
||||||
if tracedJobs == nil {
|
|
||||||
tracedJobs = make(map[string]*tracedJob)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, j := range parsedMsg.jobsAvailable {
|
|
||||||
jobSpan := tracer.StartSpan(
|
|
||||||
"GitHub Actions Workflow Run",
|
|
||||||
tracer.StartTime(j.QueueTime),
|
|
||||||
tracer.Tag("runner_request_id", fmt.Sprintf("%d", j.RunnerRequestId)),
|
|
||||||
tracer.Tag("repository_name", j.RepositoryName),
|
|
||||||
tracer.Tag("owner_name", j.OwnerName),
|
|
||||||
tracer.Tag("workflow_ref", fmt.Sprintf("%s", j.JobWorkflowRef)),
|
|
||||||
tracer.Tag("workflow_run_id", fmt.Sprintf("%d", j.WorkflowRunId)),
|
|
||||||
)
|
|
||||||
|
|
||||||
runnerSetAssignSpan := tracer.StartSpan(
|
|
||||||
"runnerSetAssign",
|
|
||||||
tracer.ChildOf(jobSpan.Context()),
|
|
||||||
tracer.StartTime(j.QueueTime),
|
|
||||||
tracer.Tag("runner_request_id", fmt.Sprintf("%d", j.RunnerRequestId)),
|
|
||||||
tracer.Tag("repository_name", j.RepositoryName),
|
|
||||||
tracer.Tag("owner_name", j.OwnerName),
|
|
||||||
)
|
|
||||||
|
|
||||||
reqID := fmt.Sprintf("%d", j.RunnerRequestId)
|
|
||||||
tracedJobs[reqID] = &tracedJob{
|
|
||||||
jobSpan: jobSpan,
|
|
||||||
runnerSetAssignSpan: runnerSetAssignSpan,
|
|
||||||
}
|
|
||||||
|
|
||||||
l.logger.Info("Listener.progressTraces: Job available", "queueTime", j.QueueTime, "runnerAssignTime", j.ScaleSetAssignTime, "requestLabels", j.RequestLabels, "now", time.Now())
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, j := range parsedMsg.jobsStarted {
|
|
||||||
reqID := fmt.Sprintf("%d", j.RunnerRequestId)
|
|
||||||
t := tracedJobs[reqID]
|
|
||||||
if t == nil {
|
|
||||||
s := tracer.StartSpan(fmt.Sprintf("%s", j.JobWorkflowRef), tracer.StartTime(j.QueueTime))
|
|
||||||
tracedJobs[reqID] = &tracedJob{jobSpan: s}
|
|
||||||
|
|
||||||
l.logger.Error(errors.New("job and runnerSetAssign spans have not started yet"), "runnerRequestId", j.RunnerRequestId)
|
|
||||||
} else {
|
|
||||||
if t.runnerSetAssignSpan == nil {
|
|
||||||
l.logger.Error(errors.New("runnerSetAssignSpan has not started yet"), "runnerRequestId", j.RunnerRequestId)
|
|
||||||
} else {
|
|
||||||
t.runnerSetAssignSpan.Finish(tracer.FinishTime(j.RunnerAssignTime))
|
|
||||||
}
|
|
||||||
|
|
||||||
t.runnerAssignSpan = tracer.StartSpan(
|
|
||||||
"runnerAssign",
|
|
||||||
tracer.ChildOf(t.jobSpan.Context()),
|
|
||||||
tracer.StartTime(j.RunnerAssignTime),
|
|
||||||
)
|
|
||||||
now := time.Now()
|
|
||||||
t.runnerAssignSpan.Finish(tracer.FinishTime(now))
|
|
||||||
|
|
||||||
t.runnerRunJobSpan = tracer.StartSpan(
|
|
||||||
"runnerRunJob",
|
|
||||||
tracer.ChildOf(t.jobSpan.Context()),
|
|
||||||
tracer.StartTime(now),
|
|
||||||
)
|
|
||||||
|
|
||||||
l.logger.Info("Listener.progressTraces: Job started", "queueTime", j.QueueTime, "runnerAssignTime", j.RunnerAssignTime, "requestLabels", j.RequestLabels, "now", now)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, j := range parsedMsg.jobsCompleted {
|
|
||||||
reqID := fmt.Sprintf("%d", j.RunnerRequestId)
|
|
||||||
t := tracedJobs[reqID]
|
|
||||||
if t == nil {
|
|
||||||
s := tracer.StartSpan(fmt.Sprintf("%s", j.JobWorkflowRef), tracer.StartTime(j.QueueTime))
|
|
||||||
t = &tracedJob{jobSpan: s}
|
|
||||||
tracedJobs[reqID] = t
|
|
||||||
|
|
||||||
l.logger.Error(errors.New("job, runnerSetAssign and runnerAssign spans have not started yet"), "runnerRequestId", j.RunnerRequestId)
|
|
||||||
} else {
|
|
||||||
if t.runnerRunJobSpan == nil {
|
|
||||||
l.logger.Error(errors.New("runnerRunJobSPan has not started yet"), "runnerRequestId", j.RunnerRequestId)
|
|
||||||
} else {
|
|
||||||
t.runnerRunJobSpan.Finish(tracer.FinishTime(j.FinishTime))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s := t.jobSpan
|
|
||||||
s.Finish(tracer.FinishTime(j.FinishTime))
|
|
||||||
delete(tracedJobs, reqID)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *actions.RunnerScaleSetMessage) error {
|
func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *actions.RunnerScaleSetMessage) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.handleMessage")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
parsedMsg, err := l.parseMessage(ctx, msg)
|
parsedMsg, err := l.parseMessage(ctx, msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to parse message: %w", err)
|
return fmt.Errorf("failed to parse message: %w", err)
|
||||||
}
|
}
|
||||||
l.metrics.PublishStatistics(parsedMsg.statistics)
|
l.metrics.PublishStatistics(parsedMsg.statistics)
|
||||||
|
|
||||||
l.progressTraces(parsedMsg)
|
|
||||||
|
|
||||||
if len(parsedMsg.jobsAvailable) > 0 {
|
if len(parsedMsg.jobsAvailable) > 0 {
|
||||||
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, parsedMsg.jobsAvailable)
|
acquiredJobIDs, err := l.acquireAvailableJobs(ctx, parsedMsg.jobsAvailable)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -345,9 +223,6 @@ func (l *Listener) handleMessage(ctx context.Context, handler Handler, msg *acti
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *Listener) createSession(ctx context.Context) error {
|
func (l *Listener) createSession(ctx context.Context) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.createSession")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var session *actions.RunnerScaleSetSession
|
var session *actions.RunnerScaleSetSession
|
||||||
var retries int
|
var retries int
|
||||||
|
|
||||||
@@ -393,9 +268,6 @@ func (l *Listener) createSession(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessage, error) {
|
func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessage, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.getMessage")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
|
l.logger.Info("Getting next message", "lastMessageID", l.lastMessageID)
|
||||||
msg, err := l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity)
|
msg, err := l.client.GetMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID, l.maxCapacity)
|
||||||
if err == nil { // if NO error
|
if err == nil { // if NO error
|
||||||
@@ -422,9 +294,6 @@ func (l *Listener) getMessage(ctx context.Context) (*actions.RunnerScaleSetMessa
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *Listener) deleteLastMessage(ctx context.Context) error {
|
func (l *Listener) deleteLastMessage(ctx context.Context) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.deleteLastMessage")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID)
|
l.logger.Info("Deleting last message", "lastMessageID", l.lastMessageID)
|
||||||
err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
|
err := l.client.DeleteMessage(ctx, l.session.MessageQueueUrl, l.session.MessageQueueAccessToken, l.lastMessageID)
|
||||||
if err == nil { // if NO error
|
if err == nil { // if NO error
|
||||||
@@ -456,9 +325,6 @@ type parsedMessage struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSetMessage) (*parsedMessage, error) {
|
func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSetMessage) (*parsedMessage, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.parseMessage")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if msg.MessageType != "RunnerScaleSetJobMessages" {
|
if msg.MessageType != "RunnerScaleSetJobMessages" {
|
||||||
l.logger.Info("Skipping message", "messageType", msg.MessageType)
|
l.logger.Info("Skipping message", "messageType", msg.MessageType)
|
||||||
return nil, fmt.Errorf("invalid message type: %s", msg.MessageType)
|
return nil, fmt.Errorf("invalid message type: %s", msg.MessageType)
|
||||||
@@ -532,9 +398,6 @@ func (l *Listener) parseMessage(ctx context.Context, msg *actions.RunnerScaleSet
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
|
func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*actions.JobAvailable) ([]int64, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.acquireAvailableJobs", trace.WithLinks())
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ids := make([]int64, 0, len(jobsAvailable))
|
ids := make([]int64, 0, len(jobsAvailable))
|
||||||
for _, job := range jobsAvailable {
|
for _, job := range jobsAvailable {
|
||||||
ids = append(ids, job.RunnerRequestId)
|
ids = append(ids, job.RunnerRequestId)
|
||||||
@@ -565,9 +428,6 @@ func (l *Listener) acquireAvailableJobs(ctx context.Context, jobsAvailable []*ac
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *Listener) refreshSession(ctx context.Context) error {
|
func (l *Listener) refreshSession(ctx context.Context) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Listener.refreshSession")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
l.logger.Info("Message queue token is expired during GetNextMessage, refreshing...")
|
l.logger.Info("Message queue token is expired during GetNextMessage, refreshing...")
|
||||||
session, err := l.client.RefreshMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId)
|
session, err := l.client.RefreshMessageSession(ctx, l.session.RunnerScaleSet.Id, l.session.SessionId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
context "context"
|
context "context"
|
||||||
|
|
||||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
mock "github.com/stretchr/testify/mock"
|
||||||
|
|
||||||
@@ -20,9 +19,6 @@ type Client struct {
|
|||||||
|
|
||||||
// AcquireJobs provides a mock function with given fields: ctx, runnerScaleSetId, messageQueueAccessToken, requestIds
|
// AcquireJobs provides a mock function with given fields: ctx, runnerScaleSetId, messageQueueAccessToken, requestIds
|
||||||
func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) {
|
func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, messageQueueAccessToken string, requestIds []int64) ([]int64, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Client.AcquireJobs")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
|
ret := _m.Called(ctx, runnerScaleSetId, messageQueueAccessToken, requestIds)
|
||||||
|
|
||||||
var r0 []int64
|
var r0 []int64
|
||||||
@@ -49,9 +45,6 @@ func (_m *Client) AcquireJobs(ctx context.Context, runnerScaleSetId int, message
|
|||||||
|
|
||||||
// CreateMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, owner
|
// CreateMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, owner
|
||||||
func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) {
|
func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int, owner string) (*actions.RunnerScaleSetSession, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Client.CreateMessageSession")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, runnerScaleSetId, owner)
|
ret := _m.Called(ctx, runnerScaleSetId, owner)
|
||||||
|
|
||||||
var r0 *actions.RunnerScaleSetSession
|
var r0 *actions.RunnerScaleSetSession
|
||||||
@@ -78,9 +71,6 @@ func (_m *Client) CreateMessageSession(ctx context.Context, runnerScaleSetId int
|
|||||||
|
|
||||||
// DeleteMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, messageId
|
// DeleteMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, messageId
|
||||||
func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, messageId int64) error {
|
func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, messageId int64) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Client.DeleteMessage")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
|
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, messageId)
|
||||||
|
|
||||||
var r0 error
|
var r0 error
|
||||||
@@ -95,9 +85,6 @@ func (_m *Client) DeleteMessage(ctx context.Context, messageQueueUrl string, mes
|
|||||||
|
|
||||||
// DeleteMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
|
// DeleteMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
|
||||||
func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error {
|
func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Client.DeleteMessageSession")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
|
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
|
||||||
|
|
||||||
var r0 error
|
var r0 error
|
||||||
@@ -112,9 +99,6 @@ func (_m *Client) DeleteMessageSession(ctx context.Context, runnerScaleSetId int
|
|||||||
|
|
||||||
// GetAcquirableJobs provides a mock function with given fields: ctx, runnerScaleSetId
|
// GetAcquirableJobs provides a mock function with given fields: ctx, runnerScaleSetId
|
||||||
func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error) {
|
func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (*actions.AcquirableJobList, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Client.GetAcquirableJobs")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, runnerScaleSetId)
|
ret := _m.Called(ctx, runnerScaleSetId)
|
||||||
|
|
||||||
var r0 *actions.AcquirableJobList
|
var r0 *actions.AcquirableJobList
|
||||||
@@ -141,9 +125,6 @@ func (_m *Client) GetAcquirableJobs(ctx context.Context, runnerScaleSetId int) (
|
|||||||
|
|
||||||
// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity
|
// GetMessage provides a mock function with given fields: ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity
|
||||||
func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
|
func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messageQueueAccessToken string, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Client.GetMessage")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
|
ret := _m.Called(ctx, messageQueueUrl, messageQueueAccessToken, lastMessageId, maxCapacity)
|
||||||
|
|
||||||
var r0 *actions.RunnerScaleSetMessage
|
var r0 *actions.RunnerScaleSetMessage
|
||||||
@@ -170,9 +151,6 @@ func (_m *Client) GetMessage(ctx context.Context, messageQueueUrl string, messag
|
|||||||
|
|
||||||
// RefreshMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
|
// RefreshMessageSession provides a mock function with given fields: ctx, runnerScaleSetId, sessionId
|
||||||
func (_m *Client) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error) {
|
func (_m *Client) RefreshMessageSession(ctx context.Context, runnerScaleSetId int, sessionId *uuid.UUID) (*actions.RunnerScaleSetSession, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Client.RefreshMessageSession")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
|
ret := _m.Called(ctx, runnerScaleSetId, sessionId)
|
||||||
|
|
||||||
var r0 *actions.RunnerScaleSetSession
|
var r0 *actions.RunnerScaleSetSession
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
context "context"
|
context "context"
|
||||||
|
|
||||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
mock "github.com/stretchr/testify/mock"
|
||||||
)
|
)
|
||||||
@@ -18,9 +17,6 @@ type Handler struct {
|
|||||||
|
|
||||||
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, jobsCompleted
|
// HandleDesiredRunnerCount provides a mock function with given fields: ctx, count, jobsCompleted
|
||||||
func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
|
func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobsCompleted int) (int, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Handler.HandleDesiredRunnerCount")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, count, jobsCompleted)
|
ret := _m.Called(ctx, count, jobsCompleted)
|
||||||
|
|
||||||
var r0 int
|
var r0 int
|
||||||
@@ -45,9 +41,6 @@ func (_m *Handler) HandleDesiredRunnerCount(ctx context.Context, count int, jobs
|
|||||||
|
|
||||||
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
|
// HandleJobStarted provides a mock function with given fields: ctx, jobInfo
|
||||||
func (_m *Handler) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
func (_m *Handler) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Handler.HandleJobStarted")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, jobInfo)
|
ret := _m.Called(ctx, jobInfo)
|
||||||
|
|
||||||
var r0 error
|
var r0 error
|
||||||
|
|||||||
@@ -10,30 +10,9 @@ import (
|
|||||||
|
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/app"
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/app"
|
||||||
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
"github.com/actions/actions-runner-controller/cmd/ghalistener/config"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
ddotel "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentelemetry"
|
|
||||||
|
|
||||||
"go.opentelemetry.io/otel/exporters/stdout/stdoutlog"
|
|
||||||
"go.opentelemetry.io/otel/log/global"
|
|
||||||
otellog "go.opentelemetry.io/otel/sdk/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
provider := ddotel.NewTracerProvider()
|
|
||||||
defer provider.Shutdown()
|
|
||||||
|
|
||||||
otel.SetTracerProvider(provider)
|
|
||||||
|
|
||||||
loggerProvider, err := newLoggerProvider()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
global.SetLoggerProvider(loggerProvider)
|
|
||||||
|
|
||||||
log.Printf("Enabled OpenTelemetry Tracing")
|
|
||||||
|
|
||||||
configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
|
configPath, ok := os.LookupEnv("LISTENER_CONFIG_PATH")
|
||||||
if !ok {
|
if !ok {
|
||||||
fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
|
fmt.Fprintf(os.Stderr, "Error: LISTENER_CONFIG_PATH environment variable is not set\n")
|
||||||
@@ -59,15 +38,3 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLoggerProvider() (*otellog.LoggerProvider, error) {
|
|
||||||
logExporter, err := stdoutlog.New()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
loggerProvider := otellog.NewLoggerProvider(
|
|
||||||
otellog.WithProcessor(otellog.NewBatchProcessor(logExporter)),
|
|
||||||
)
|
|
||||||
return loggerProvider, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package metrics
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -11,7 +10,6 @@ import (
|
|||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -47,25 +45,14 @@ var (
|
|||||||
labelKeyJobName,
|
labelKeyJobName,
|
||||||
labelKeyJobWorkflowRef,
|
labelKeyJobWorkflowRef,
|
||||||
labelKeyEventName,
|
labelKeyEventName,
|
||||||
labelKeyRunnerID,
|
|
||||||
labelKeyRunnerName,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
completedJobLabels []string
|
completedJobsTotalLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
|
||||||
|
jobExecutionDurationLabels = append(jobLabels, labelKeyJobResult, labelKeyRunnerID, labelKeyRunnerName)
|
||||||
includeRunnerScaleSetNameInJobLabels = false
|
startedJobsTotalLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
|
||||||
|
jobStartupDurationLabels = append(jobLabels, labelKeyRunnerID, labelKeyRunnerName)
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
if os.Getenv("INCLUDE_RUNNER_SCALE_SET_NAME_IN_JOB_LABELS") == "true" {
|
|
||||||
includeRunnerScaleSetNameInJobLabels = true
|
|
||||||
|
|
||||||
jobLabels = append(jobLabels, labelKeyRunnerScaleSetName)
|
|
||||||
}
|
|
||||||
|
|
||||||
completedJobLabels = append([]string{}, append(jobLabels, labelKeyJobResult)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
assignedJobs = prometheus.NewGaugeVec(
|
assignedJobs = prometheus.NewGaugeVec(
|
||||||
prometheus.GaugeOpts{
|
prometheus.GaugeOpts{
|
||||||
@@ -145,7 +132,7 @@ var (
|
|||||||
Name: "started_jobs_total",
|
Name: "started_jobs_total",
|
||||||
Help: "Total number of jobs started.",
|
Help: "Total number of jobs started.",
|
||||||
},
|
},
|
||||||
jobLabels,
|
startedJobsTotalLabels,
|
||||||
)
|
)
|
||||||
|
|
||||||
completedJobsTotal = prometheus.NewCounterVec(
|
completedJobsTotal = prometheus.NewCounterVec(
|
||||||
@@ -154,7 +141,7 @@ var (
|
|||||||
Help: "Total number of jobs completed.",
|
Help: "Total number of jobs completed.",
|
||||||
Subsystem: githubScaleSetSubsystem,
|
Subsystem: githubScaleSetSubsystem,
|
||||||
},
|
},
|
||||||
completedJobLabels,
|
completedJobsTotalLabels,
|
||||||
)
|
)
|
||||||
|
|
||||||
jobStartupDurationSeconds = prometheus.NewHistogramVec(
|
jobStartupDurationSeconds = prometheus.NewHistogramVec(
|
||||||
@@ -164,7 +151,7 @@ var (
|
|||||||
Help: "Time spent waiting for workflow job to get started on the runner owned by the scale set (in seconds).",
|
Help: "Time spent waiting for workflow job to get started on the runner owned by the scale set (in seconds).",
|
||||||
Buckets: runtimeBuckets,
|
Buckets: runtimeBuckets,
|
||||||
},
|
},
|
||||||
jobLabels,
|
jobStartupDurationLabels,
|
||||||
)
|
)
|
||||||
|
|
||||||
jobExecutionDurationSeconds = prometheus.NewHistogramVec(
|
jobExecutionDurationSeconds = prometheus.NewHistogramVec(
|
||||||
@@ -174,7 +161,7 @@ var (
|
|||||||
Help: "Time spent executing workflow jobs by the scale set (in seconds).",
|
Help: "Time spent executing workflow jobs by the scale set (in seconds).",
|
||||||
Buckets: runtimeBuckets,
|
Buckets: runtimeBuckets,
|
||||||
},
|
},
|
||||||
completedJobLabels,
|
jobExecutionDurationLabels,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -235,7 +222,7 @@ type baseLabels struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
|
func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Labels {
|
||||||
l := prometheus.Labels{
|
return prometheus.Labels{
|
||||||
labelKeyEnterprise: b.enterprise,
|
labelKeyEnterprise: b.enterprise,
|
||||||
labelKeyOrganization: jobBase.OwnerName,
|
labelKeyOrganization: jobBase.OwnerName,
|
||||||
labelKeyRepository: jobBase.RepositoryName,
|
labelKeyRepository: jobBase.RepositoryName,
|
||||||
@@ -243,10 +230,6 @@ func (b *baseLabels) jobLabels(jobBase *actions.JobMessageBase) prometheus.Label
|
|||||||
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
|
labelKeyJobWorkflowRef: jobBase.JobWorkflowRef,
|
||||||
labelKeyEventName: jobBase.EventName,
|
labelKeyEventName: jobBase.EventName,
|
||||||
}
|
}
|
||||||
|
|
||||||
if includeRunnerScaleSetNameInJobLabels {
|
|
||||||
l[labelKeyRunnerScaleSetName] = b.scaleSetName
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *baseLabels) scaleSetLabels() prometheus.Labels {
|
func (b *baseLabels) scaleSetLabels() prometheus.Labels {
|
||||||
@@ -353,9 +336,6 @@ func NewExporter(config ExporterConfig) ServerPublisher {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *exporter) ListenAndServe(ctx context.Context) error {
|
func (e *exporter) ListenAndServe(ctx context.Context) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "exporter.ListenAndServe")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
e.logger.Info("starting metrics server", "addr", e.srv.Addr)
|
e.logger.Info("starting metrics server", "addr", e.srv.Addr)
|
||||||
go func() {
|
go func() {
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
context "context"
|
context "context"
|
||||||
|
|
||||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
mock "github.com/stretchr/testify/mock"
|
||||||
)
|
)
|
||||||
@@ -18,9 +17,6 @@ type ServerPublisher struct {
|
|||||||
|
|
||||||
// ListenAndServe provides a mock function with given fields: ctx
|
// ListenAndServe provides a mock function with given fields: ctx
|
||||||
func (_m *ServerPublisher) ListenAndServe(ctx context.Context) error {
|
func (_m *ServerPublisher) ListenAndServe(ctx context.Context) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "ServerPublisher.ListenAndServe")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx)
|
ret := _m.Called(ctx)
|
||||||
|
|
||||||
var r0 error
|
var r0 error
|
||||||
|
|||||||
@@ -11,8 +11,6 @@ import (
|
|||||||
"github.com/actions/actions-runner-controller/logging"
|
"github.com/actions/actions-runner-controller/logging"
|
||||||
jsonpatch "github.com/evanphx/json-patch"
|
jsonpatch "github.com/evanphx/json-patch"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
"go.opentelemetry.io/otel/attribute"
|
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
@@ -98,16 +96,6 @@ func (w *Worker) applyDefaults() error {
|
|||||||
// about the ephemeral runner that should not be deleted when scaling down.
|
// about the ephemeral runner that should not be deleted when scaling down.
|
||||||
// It returns an error if there is any issue with updating the job information.
|
// It returns an error if there is any issue with updating the job information.
|
||||||
func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStarted) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleJobStarted")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
span.SetAttributes(
|
|
||||||
attribute.String("runner.name", jobInfo.RunnerName),
|
|
||||||
attribute.String("runner.repo.name", jobInfo.RepositoryName),
|
|
||||||
attribute.String("workflow.ref", jobInfo.JobWorkflowRef),
|
|
||||||
attribute.Int64("workflow.run.id", jobInfo.WorkflowRunId),
|
|
||||||
)
|
|
||||||
|
|
||||||
w.logger.Info("Updating job info for the runner",
|
w.logger.Info("Updating job info for the runner",
|
||||||
"runnerName", jobInfo.RunnerName,
|
"runnerName", jobInfo.RunnerName,
|
||||||
"ownerName", jobInfo.OwnerName,
|
"ownerName", jobInfo.OwnerName,
|
||||||
@@ -176,9 +164,6 @@ func (w *Worker) HandleJobStarted(ctx context.Context, jobInfo *actions.JobStart
|
|||||||
// Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
|
// Finally, it logs the scaled ephemeral runner set details and returns nil if successful.
|
||||||
// If any error occurs during the process, it returns an error with a descriptive message.
|
// If any error occurs during the process, it returns an error with a descriptive message.
|
||||||
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error) {
|
func (w *Worker) HandleDesiredRunnerCount(ctx context.Context, count, jobsCompleted int) (int, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "Worker.HandleDesiredRunnerCount")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
patchID := w.setDesiredWorkerState(count, jobsCompleted)
|
patchID := w.setDesiredWorkerState(count, jobsCompleted)
|
||||||
|
|
||||||
original, err := json.Marshal(
|
original, err := json.Marshal(
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import (
|
|||||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||||
jsonpatch "github.com/evanphx/json-patch"
|
jsonpatch "github.com/evanphx/json-patch"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
@@ -39,9 +38,6 @@ func NewKubernetesManager(logger *logr.Logger) (*AutoScalerKubernetesManager, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (k *AutoScalerKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace, resourceName string, runnerCount int) error {
|
func (k *AutoScalerKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace, resourceName string, runnerCount int) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerKubernetesManager.ScaleEphemeralRunnerSet")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
original := &v1alpha1.EphemeralRunnerSet{
|
original := &v1alpha1.EphemeralRunnerSet{
|
||||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||||
Replicas: -1,
|
Replicas: -1,
|
||||||
@@ -87,9 +83,6 @@ func (k *AutoScalerKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Contex
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (k *AutoScalerKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName string, workflowRunId, jobRequestId int64) error {
|
func (k *AutoScalerKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName string, workflowRunId, jobRequestId int64) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerKubernetesManager.UpdateEphemeralRunnerWithJobInfo")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
original := &v1alpha1.EphemeralRunner{}
|
original := &v1alpha1.EphemeralRunner{}
|
||||||
originalJson, err := json.Marshal(original)
|
originalJson, err := json.Marshal(original)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ import (
|
|||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -39,9 +38,6 @@ func NewAutoScalerClient(
|
|||||||
runnerScaleSetId int,
|
runnerScaleSetId int,
|
||||||
options ...func(*AutoScalerClient),
|
options ...func(*AutoScalerClient),
|
||||||
) (*AutoScalerClient, error) {
|
) (*AutoScalerClient, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "NewAutoScalerClient")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
listener := AutoScalerClient{
|
listener := AutoScalerClient{
|
||||||
logger: logger.WithName("auto_scaler"),
|
logger: logger.WithName("auto_scaler"),
|
||||||
}
|
}
|
||||||
@@ -63,9 +59,6 @@ func NewAutoScalerClient(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func createSession(ctx context.Context, logger *logr.Logger, client actions.ActionsService, runnerScaleSetId int) (*actions.RunnerScaleSetSession, *actions.RunnerScaleSetMessage, error) {
|
func createSession(ctx context.Context, logger *logr.Logger, client actions.ActionsService, runnerScaleSetId int) (*actions.RunnerScaleSetSession, *actions.RunnerScaleSetMessage, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "createSession")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
hostName, err := os.Hostname()
|
hostName, err := os.Hostname()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
hostName = uuid.New().String()
|
hostName = uuid.New().String()
|
||||||
@@ -137,9 +130,6 @@ func (m *AutoScalerClient) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error, maxCapacity int) error {
|
func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(msg *actions.RunnerScaleSetMessage) error, maxCapacity int) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerClient.GetRunnerScaleSetMessage")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if m.initialMessage != nil {
|
if m.initialMessage != nil {
|
||||||
err := handler(m.initialMessage)
|
err := handler(m.initialMessage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -172,9 +162,6 @@ func (m *AutoScalerClient) GetRunnerScaleSetMessage(ctx context.Context, handler
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *AutoScalerClient) deleteMessage(ctx context.Context, messageId int64) error {
|
func (m *AutoScalerClient) deleteMessage(ctx context.Context, messageId int64) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerClient.deleteMessage")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
err := m.client.DeleteMessage(ctx, messageId)
|
err := m.client.DeleteMessage(ctx, messageId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("delete message failed from refreshing client. %w", err)
|
return fmt.Errorf("delete message failed from refreshing client. %w", err)
|
||||||
@@ -185,9 +172,6 @@ func (m *AutoScalerClient) deleteMessage(ctx context.Context, messageId int64) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *AutoScalerClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error {
|
func (m *AutoScalerClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoScalerClient.AcquireJobsForRunnerScaleSet")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
m.logger.Info("acquiring jobs.", "request count", len(requestIds), "requestIds", fmt.Sprint(requestIds))
|
m.logger.Info("acquiring jobs.", "request count", len(requestIds), "requestIds", fmt.Sprint(requestIds))
|
||||||
if len(requestIds) == 0 {
|
if len(requestIds) == 0 {
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -10,7 +10,6 @@ import (
|
|||||||
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
|
"github.com/actions/actions-runner-controller/cmd/githubrunnerscalesetlistener/config"
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ScaleSettings struct {
|
type ScaleSettings struct {
|
||||||
@@ -61,9 +60,6 @@ func NewService(
|
|||||||
settings *ScaleSettings,
|
settings *ScaleSettings,
|
||||||
options ...func(*Service),
|
options ...func(*Service),
|
||||||
) (*Service, error) {
|
) (*Service, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "NewService")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
s := &Service{
|
s := &Service{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
rsClient: rsClient,
|
rsClient: rsClient,
|
||||||
|
|||||||
@@ -34,7 +34,6 @@ import (
|
|||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
"golang.org/x/net/http/httpproxy"
|
"golang.org/x/net/http/httpproxy"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
@@ -156,9 +155,6 @@ type runOptions struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func run(ctx context.Context, rc config.Config, logger logr.Logger, opts runOptions) error {
|
func run(ctx context.Context, rc config.Config, logger logr.Logger, opts runOptions) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "run")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
// Create root context and hook with sigint and sigterm
|
// Create root context and hook with sigint and sigterm
|
||||||
creds := &actions.ActionsAuth{}
|
creds := &actions.ActionsAuth{}
|
||||||
if rc.Token != "" {
|
if rc.Token != "" {
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
context "context"
|
context "context"
|
||||||
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
mock "github.com/stretchr/testify/mock"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockKubernetesManager is an autogenerated mock type for the KubernetesManager type
|
// MockKubernetesManager is an autogenerated mock type for the KubernetesManager type
|
||||||
@@ -16,9 +15,6 @@ type MockKubernetesManager struct {
|
|||||||
|
|
||||||
// ScaleEphemeralRunnerSet provides a mock function with given fields: ctx, namespace, resourceName, runnerCount
|
// ScaleEphemeralRunnerSet provides a mock function with given fields: ctx, namespace, resourceName, runnerCount
|
||||||
func (_m *MockKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace string, resourceName string, runnerCount int) error {
|
func (_m *MockKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, namespace string, resourceName string, runnerCount int) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "MockKubernetesManager.ScaleEphemeralRunnerSet")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, namespace, resourceName, runnerCount)
|
ret := _m.Called(ctx, namespace, resourceName, runnerCount)
|
||||||
|
|
||||||
var r0 error
|
var r0 error
|
||||||
@@ -33,9 +29,6 @@ func (_m *MockKubernetesManager) ScaleEphemeralRunnerSet(ctx context.Context, na
|
|||||||
|
|
||||||
// UpdateEphemeralRunnerWithJobInfo provides a mock function with given fields: ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId
|
// UpdateEphemeralRunnerWithJobInfo provides a mock function with given fields: ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId
|
||||||
func (_m *MockKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace string, resourceName string, ownerName string, repositoryName string, jobWorkflowRef string, jobDisplayName string, jobRequestId int64, workflowRunId int64) error {
|
func (_m *MockKubernetesManager) UpdateEphemeralRunnerWithJobInfo(ctx context.Context, namespace string, resourceName string, ownerName string, repositoryName string, jobWorkflowRef string, jobDisplayName string, jobRequestId int64, workflowRunId int64) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "MockKubernetesManager.UpdateEphemeralRunnerWithJobInfo")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId)
|
ret := _m.Called(ctx, namespace, resourceName, ownerName, repositoryName, jobWorkflowRef, jobDisplayName, jobRequestId, workflowRunId)
|
||||||
|
|
||||||
var r0 error
|
var r0 error
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
context "context"
|
context "context"
|
||||||
|
|
||||||
actions "github.com/actions/actions-runner-controller/github/actions"
|
actions "github.com/actions/actions-runner-controller/github/actions"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
mock "github.com/stretchr/testify/mock"
|
||||||
)
|
)
|
||||||
@@ -18,9 +17,6 @@ type MockRunnerScaleSetClient struct {
|
|||||||
|
|
||||||
// AcquireJobsForRunnerScaleSet provides a mock function with given fields: ctx, requestIds
|
// AcquireJobsForRunnerScaleSet provides a mock function with given fields: ctx, requestIds
|
||||||
func (_m *MockRunnerScaleSetClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error {
|
func (_m *MockRunnerScaleSetClient) AcquireJobsForRunnerScaleSet(ctx context.Context, requestIds []int64) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "MockRunnerScaleSetClient.AcquireJobsForRunnerScaleSet")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, requestIds)
|
ret := _m.Called(ctx, requestIds)
|
||||||
|
|
||||||
var r0 error
|
var r0 error
|
||||||
@@ -35,9 +31,6 @@ func (_m *MockRunnerScaleSetClient) AcquireJobsForRunnerScaleSet(ctx context.Con
|
|||||||
|
|
||||||
// GetRunnerScaleSetMessage provides a mock function with given fields: ctx, handler, maxCapacity
|
// GetRunnerScaleSetMessage provides a mock function with given fields: ctx, handler, maxCapacity
|
||||||
func (_m *MockRunnerScaleSetClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(*actions.RunnerScaleSetMessage) error, maxCapacity int) error {
|
func (_m *MockRunnerScaleSetClient) GetRunnerScaleSetMessage(ctx context.Context, handler func(*actions.RunnerScaleSetMessage) error, maxCapacity int) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "MockRunnerScaleSetClient.GetRunnerScaleSetMessage")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ret := _m.Called(ctx, handler, maxCapacity)
|
ret := _m.Called(ctx, handler, maxCapacity)
|
||||||
|
|
||||||
var r0 error
|
var r0 error
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import (
|
|||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type SessionRefreshingClient struct {
|
type SessionRefreshingClient struct {
|
||||||
@@ -26,9 +25,6 @@ func newSessionClient(client actions.ActionsService, logger *logr.Logger, sessio
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
|
func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId int64, maxCapacity int) (*actions.RunnerScaleSetMessage, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "SessionRefreshingClient.GetMessage")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if maxCapacity < 0 {
|
if maxCapacity < 0 {
|
||||||
return nil, fmt.Errorf("maxCapacity must be greater than or equal to 0")
|
return nil, fmt.Errorf("maxCapacity must be greater than or equal to 0")
|
||||||
}
|
}
|
||||||
@@ -59,9 +55,6 @@ func (m *SessionRefreshingClient) GetMessage(ctx context.Context, lastMessageId
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *SessionRefreshingClient) DeleteMessage(ctx context.Context, messageId int64) error {
|
func (m *SessionRefreshingClient) DeleteMessage(ctx context.Context, messageId int64) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "SessionRefreshingClient.DeleteMessage")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
err := m.client.DeleteMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, messageId)
|
err := m.client.DeleteMessage(ctx, m.session.MessageQueueUrl, m.session.MessageQueueAccessToken, messageId)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -89,9 +82,6 @@ func (m *SessionRefreshingClient) DeleteMessage(ctx context.Context, messageId i
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *SessionRefreshingClient) AcquireJobs(ctx context.Context, requestIds []int64) ([]int64, error) {
|
func (m *SessionRefreshingClient) AcquireJobs(ctx context.Context, requestIds []int64) ([]int64, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "SessionRefreshingClient.AcquireJobs")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ids, err := m.client.AcquireJobs(ctx, m.session.RunnerScaleSet.Id, m.session.MessageQueueAccessToken, requestIds)
|
ids, err := m.client.AcquireJobs(ctx, m.session.RunnerScaleSet.Id, m.session.MessageQueueAccessToken, requestIds)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return ids, nil
|
return ids, nil
|
||||||
|
|||||||
@@ -21,8 +21,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
otelCodes "go.opentelemetry.io/otel/codes"
|
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@@ -57,7 +55,7 @@ type AutoscalingListenerReconciler struct {
|
|||||||
ListenerMetricsAddr string
|
ListenerMetricsAddr string
|
||||||
ListenerMetricsEndpoint string
|
ListenerMetricsEndpoint string
|
||||||
|
|
||||||
resourceBuilder resourceBuilder
|
ResourceBuilder
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -72,9 +70,6 @@ type AutoscalingListenerReconciler struct {
|
|||||||
|
|
||||||
// Reconcile a AutoscalingListener resource to meet its desired spec.
|
// Reconcile a AutoscalingListener resource to meet its desired spec.
|
||||||
func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.Reconcile")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
log := r.Log.WithValues("autoscalinglistener", req.NamespacedName)
|
log := r.Log.WithValues("autoscalinglistener", req.NamespacedName)
|
||||||
|
|
||||||
autoscalingListener := new(v1alpha1.AutoscalingListener)
|
autoscalingListener := new(v1alpha1.AutoscalingListener)
|
||||||
@@ -247,17 +242,27 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
|||||||
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, mirrorSecret, log)
|
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, mirrorSecret, log)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The listener pod failed might mean the mirror secret is out of date
|
cs := listenerContainerStatus(listenerPod)
|
||||||
// Delete the listener pod and re-create it to make sure the mirror secret is up to date
|
switch {
|
||||||
if listenerPod.Status.Phase == corev1.PodFailed && listenerPod.DeletionTimestamp.IsZero() {
|
case cs == nil:
|
||||||
log.Info("Listener pod failed, deleting it and re-creating it", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", listenerPod.Status.Reason, "message", listenerPod.Status.Message)
|
log.Info("Listener pod is not ready", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||||
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
|
return ctrl.Result{}, nil
|
||||||
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
case cs.State.Terminated != nil:
|
||||||
return ctrl.Result{}, err
|
log.Info("Listener pod is terminated", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", cs.State.Terminated.Reason, "message", cs.State.Terminated.Message)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if listenerPod.Status.Phase == corev1.PodRunning {
|
if err := r.publishRunningListener(autoscalingListener, false); err != nil {
|
||||||
|
log.Error(err, "Unable to publish runner listener down metric", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if listenerPod.DeletionTimestamp.IsZero() {
|
||||||
|
log.Info("Deleting the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||||
|
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
|
||||||
|
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
case cs.State.Running != nil:
|
||||||
if err := r.publishRunningListener(autoscalingListener, true); err != nil {
|
if err := r.publishRunningListener(autoscalingListener, true); err != nil {
|
||||||
log.Error(err, "Unable to publish running listener", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
log.Error(err, "Unable to publish running listener", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||||
// stop reconciling. We should never get to this point but if we do,
|
// stop reconciling. We should never get to this point but if we do,
|
||||||
@@ -265,21 +270,12 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
|
|||||||
// notify the reconciler again.
|
// notify the reconciler again.
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) {
|
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.cleanupResources")
|
|
||||||
defer span.End()
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
span.SetStatus(otelCodes.Error, "error")
|
|
||||||
span.RecordError(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
logger.Info("Cleaning up the listener pod")
|
logger.Info("Cleaning up the listener pod")
|
||||||
listenerPod := new(corev1.Pod)
|
listenerPod := new(corev1.Pod)
|
||||||
err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerPod)
|
err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerPod)
|
||||||
@@ -387,10 +383,7 @@ func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, au
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createServiceAccountForListener")
|
newServiceAccount := r.ResourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
newServiceAccount := r.resourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
|
|
||||||
|
|
||||||
if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil {
|
if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil {
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -407,9 +400,6 @@ func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx cont
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createListenerPod")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var envs []corev1.EnvVar
|
var envs []corev1.EnvVar
|
||||||
if autoscalingListener.Spec.Proxy != nil {
|
if autoscalingListener.Spec.Proxy != nil {
|
||||||
httpURL := corev1.EnvVar{
|
httpURL := corev1.EnvVar{
|
||||||
@@ -478,7 +468,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
|||||||
|
|
||||||
logger.Info("Creating listener config secret")
|
logger.Info("Creating listener config secret")
|
||||||
|
|
||||||
podConfig, err := r.resourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
|
podConfig, err := r.ResourceBuilder.newScaleSetListenerConfig(autoscalingListener, secret, metricsConfig, cert)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err, "Failed to build listener config secret")
|
logger.Error(err, "Failed to build listener config secret")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -497,7 +487,7 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
|||||||
return ctrl.Result{Requeue: true}, nil
|
return ctrl.Result{Requeue: true}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
newPod, err := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
|
newPod, err := r.ResourceBuilder.newScaleSetListenerPod(autoscalingListener, &podConfig, serviceAccount, secret, metricsConfig, envs...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err, "Failed to build listener pod")
|
logger.Error(err, "Failed to build listener pod")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -519,9 +509,6 @@ func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, a
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener) (string, error) {
|
func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener) (string, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.certificate")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if autoscalingListener.Spec.GitHubServerTLS.CertificateFrom == nil {
|
if autoscalingListener.Spec.GitHubServerTLS.CertificateFrom == nil {
|
||||||
return "", fmt.Errorf("githubServerTLS.certificateFrom is not specified")
|
return "", fmt.Errorf("githubServerTLS.certificateFrom is not specified")
|
||||||
}
|
}
|
||||||
@@ -560,10 +547,7 @@ func (r *AutoscalingListenerReconciler) certificate(ctx context.Context, autosca
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createSecretsForListener")
|
newListenerSecret := r.ResourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
newListenerSecret := r.resourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
|
|
||||||
|
|
||||||
if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
|
if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -580,9 +564,6 @@ func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Con
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createProxySecret")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
data, err := autoscalingListener.Spec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) {
|
data, err := autoscalingListener.Spec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) {
|
||||||
var secret corev1.Secret
|
var secret corev1.Secret
|
||||||
err := r.Get(ctx, types.NamespacedName{Name: s, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, &secret)
|
err := r.Get(ctx, types.NamespacedName{Name: s, Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace}, &secret)
|
||||||
@@ -622,9 +603,6 @@ func (r *AutoscalingListenerReconciler) createProxySecret(ctx context.Context, a
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.updateSecretsForListener")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
dataHash := hash.ComputeTemplateHash(secret.Data)
|
dataHash := hash.ComputeTemplateHash(secret.Data)
|
||||||
updatedMirrorSecret := mirrorSecret.DeepCopy()
|
updatedMirrorSecret := mirrorSecret.DeepCopy()
|
||||||
updatedMirrorSecret.Labels["secret-data-hash"] = dataHash
|
updatedMirrorSecret.Labels["secret-data-hash"] = dataHash
|
||||||
@@ -641,10 +619,7 @@ func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Con
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createRoleForListener")
|
newRole := r.ResourceBuilder.newScaleSetListenerRole(autoscalingListener)
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
newRole := r.resourceBuilder.newScaleSetListenerRole(autoscalingListener)
|
|
||||||
|
|
||||||
logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
|
logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
|
||||||
if err := r.Create(ctx, newRole); err != nil {
|
if err := r.Create(ctx, newRole); err != nil {
|
||||||
@@ -657,9 +632,6 @@ func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Contex
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Context, listenerRole *rbacv1.Role, desiredRules []rbacv1.PolicyRule, desiredRulesHash string, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Context, listenerRole *rbacv1.Role, desiredRules []rbacv1.PolicyRule, desiredRulesHash string, logger logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.updateRoleForListener")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
updatedPatchRole := listenerRole.DeepCopy()
|
updatedPatchRole := listenerRole.DeepCopy()
|
||||||
updatedPatchRole.Labels["role-policy-rules-hash"] = desiredRulesHash
|
updatedPatchRole.Labels["role-policy-rules-hash"] = desiredRulesHash
|
||||||
updatedPatchRole.Rules = desiredRules
|
updatedPatchRole.Rules = desiredRules
|
||||||
@@ -675,10 +647,7 @@ func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Contex
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingListenerReconciler.createRoleBindingForListener")
|
newRoleBinding := r.ResourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
newRoleBinding := r.resourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
|
|
||||||
|
|
||||||
logger.Info("Creating listener role binding",
|
logger.Info("Creating listener role binding",
|
||||||
"namespace", newRoleBinding.Namespace,
|
"namespace", newRoleBinding.Namespace,
|
||||||
@@ -763,3 +732,13 @@ func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error
|
|||||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func listenerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {
|
||||||
|
for i := range pod.Status.ContainerStatuses {
|
||||||
|
cs := &pod.Status.ContainerStatuses[i]
|
||||||
|
if cs.Name == autoscalingListenerContainerName {
|
||||||
|
return cs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
autoscalingListenerTestTimeout = time.Second * 5
|
autoscalingListenerTestTimeout = time.Second * 20
|
||||||
autoscalingListenerTestInterval = time.Millisecond * 250
|
autoscalingListenerTestInterval = time.Millisecond * 250
|
||||||
autoscalingListenerTestGitHubToken = "gh_token"
|
autoscalingListenerTestGitHubToken = "gh_token"
|
||||||
)
|
)
|
||||||
@@ -351,6 +351,53 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
|||||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated")
|
autoscalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated")
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("It should re-create pod whenever listener container is terminated", func() {
|
||||||
|
// Waiting for the pod is created
|
||||||
|
pod := new(corev1.Pod)
|
||||||
|
Eventually(
|
||||||
|
func() (string, error) {
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return pod.Name, nil
|
||||||
|
},
|
||||||
|
autoscalingListenerTestTimeout,
|
||||||
|
autoscalingListenerTestInterval,
|
||||||
|
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
|
||||||
|
|
||||||
|
oldPodUID := string(pod.UID)
|
||||||
|
updated := pod.DeepCopy()
|
||||||
|
updated.Status.ContainerStatuses = []corev1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: autoscalingListenerContainerName,
|
||||||
|
State: corev1.ContainerState{
|
||||||
|
Terminated: &corev1.ContainerStateTerminated{
|
||||||
|
ExitCode: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := k8sClient.Status().Update(ctx, updated)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to update test pod")
|
||||||
|
|
||||||
|
// Waiting for the new pod is created
|
||||||
|
Eventually(
|
||||||
|
func() (string, error) {
|
||||||
|
pod := new(corev1.Pod)
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(pod.UID), nil
|
||||||
|
},
|
||||||
|
autoscalingListenerTestTimeout,
|
||||||
|
autoscalingListenerTestInterval,
|
||||||
|
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be re-created")
|
||||||
|
})
|
||||||
|
|
||||||
It("It should update mirror secrets to match secret used by AutoScalingRunnerSet", func() {
|
It("It should update mirror secrets to match secret used by AutoScalingRunnerSet", func() {
|
||||||
// Waiting for the pod is created
|
// Waiting for the pod is created
|
||||||
pod := new(corev1.Pod)
|
pod := new(corev1.Pod)
|
||||||
@@ -373,7 +420,18 @@ var _ = Describe("Test AutoScalingListener controller", func() {
|
|||||||
Expect(err).NotTo(HaveOccurred(), "failed to update test secret")
|
Expect(err).NotTo(HaveOccurred(), "failed to update test secret")
|
||||||
|
|
||||||
updatedPod := pod.DeepCopy()
|
updatedPod := pod.DeepCopy()
|
||||||
updatedPod.Status.Phase = corev1.PodFailed
|
// Ignore status running and consult the container state
|
||||||
|
updatedPod.Status.Phase = corev1.PodRunning
|
||||||
|
updatedPod.Status.ContainerStatuses = []corev1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: autoscalingListenerContainerName,
|
||||||
|
State: corev1.ContainerState{
|
||||||
|
Terminated: &corev1.ContainerStateTerminated{
|
||||||
|
ExitCode: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
err = k8sClient.Status().Update(ctx, updatedPod)
|
err = k8sClient.Status().Update(ctx, updatedPod)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to update test pod to failed")
|
Expect(err).NotTo(HaveOccurred(), "failed to update test pod to failed")
|
||||||
|
|
||||||
@@ -420,6 +478,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
|||||||
var autoscalingListener *v1alpha1.AutoscalingListener
|
var autoscalingListener *v1alpha1.AutoscalingListener
|
||||||
|
|
||||||
var runAsUser int64 = 1001
|
var runAsUser int64 = 1001
|
||||||
|
const sidecarContainerName = "sidecar"
|
||||||
|
|
||||||
listenerPodTemplate := corev1.PodTemplateSpec{
|
listenerPodTemplate := corev1.PodTemplateSpec{
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
@@ -432,7 +491,7 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "sidecar",
|
Name: sidecarContainerName,
|
||||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||||
Image: "busybox",
|
Image: "busybox",
|
||||||
},
|
},
|
||||||
@@ -525,7 +584,8 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
|||||||
return created.Finalizers[0], nil
|
return created.Finalizers[0], nil
|
||||||
},
|
},
|
||||||
autoscalingListenerTestTimeout,
|
autoscalingListenerTestTimeout,
|
||||||
autoscalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
|
autoscalingListenerTestInterval,
|
||||||
|
).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
|
||||||
|
|
||||||
// Check if config is created
|
// Check if config is created
|
||||||
config := new(corev1.Secret)
|
config := new(corev1.Secret)
|
||||||
@@ -559,11 +619,100 @@ var _ = Describe("Test AutoScalingListener customization", func() {
|
|||||||
Expect(pod.Spec.Containers[0].SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
|
Expect(pod.Spec.Containers[0].SecurityContext.RunAsUser).To(Equal(&runAsUser), "Pod should have the correct security context")
|
||||||
Expect(pod.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways), "Pod should have the correct image pull policy")
|
Expect(pod.Spec.Containers[0].ImagePullPolicy).To(Equal(corev1.PullAlways), "Pod should have the correct image pull policy")
|
||||||
|
|
||||||
Expect(pod.Spec.Containers[1].Name).To(Equal("sidecar"), "Pod should have the correct container name")
|
Expect(pod.Spec.Containers[1].Name).To(Equal(sidecarContainerName), "Pod should have the correct container name")
|
||||||
Expect(pod.Spec.Containers[1].Image).To(Equal("busybox"), "Pod should have the correct image")
|
Expect(pod.Spec.Containers[1].Image).To(Equal("busybox"), "Pod should have the correct image")
|
||||||
Expect(pod.Spec.Containers[1].ImagePullPolicy).To(Equal(corev1.PullIfNotPresent), "Pod should have the correct image pull policy")
|
Expect(pod.Spec.Containers[1].ImagePullPolicy).To(Equal(corev1.PullIfNotPresent), "Pod should have the correct image pull policy")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
Context("When AutoscalingListener pod has interuptions", func() {
|
||||||
|
It("Should re-create pod when it is deleted", func() {
|
||||||
|
pod := new(corev1.Pod)
|
||||||
|
Eventually(
|
||||||
|
func() (string, error) {
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return pod.Name, nil
|
||||||
|
},
|
||||||
|
autoscalingListenerTestTimeout,
|
||||||
|
autoscalingListenerTestInterval,
|
||||||
|
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
|
||||||
|
|
||||||
|
Expect(len(pod.Spec.Containers)).To(Equal(2), "Pod should have 2 containers")
|
||||||
|
oldPodUID := string(pod.UID)
|
||||||
|
|
||||||
|
err := k8sClient.Delete(ctx, pod)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
|
||||||
|
|
||||||
|
pod = new(corev1.Pod)
|
||||||
|
Eventually(
|
||||||
|
func() (string, error) {
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(pod.UID), nil
|
||||||
|
},
|
||||||
|
autoscalingListenerTestTimeout,
|
||||||
|
autoscalingListenerTestInterval,
|
||||||
|
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be created")
|
||||||
|
})
|
||||||
|
|
||||||
|
It("Should re-create pod when the listener pod is terminated", func() {
|
||||||
|
pod := new(corev1.Pod)
|
||||||
|
Eventually(
|
||||||
|
func() (string, error) {
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return pod.Name, nil
|
||||||
|
},
|
||||||
|
autoscalingListenerTestTimeout,
|
||||||
|
autoscalingListenerTestInterval,
|
||||||
|
).Should(BeEquivalentTo(autoscalingListener.Name), "Pod should be created")
|
||||||
|
|
||||||
|
updated := pod.DeepCopy()
|
||||||
|
oldPodUID := string(pod.UID)
|
||||||
|
updated.Status.ContainerStatuses = []corev1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: autoscalingListenerContainerName,
|
||||||
|
State: corev1.ContainerState{
|
||||||
|
Terminated: &corev1.ContainerStateTerminated{
|
||||||
|
ExitCode: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: sidecarContainerName,
|
||||||
|
State: corev1.ContainerState{
|
||||||
|
Running: &corev1.ContainerStateRunning{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := k8sClient.Status().Update(ctx, updated)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to update pod status")
|
||||||
|
|
||||||
|
pod = new(corev1.Pod)
|
||||||
|
Eventually(
|
||||||
|
func() (string, error) {
|
||||||
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, pod)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(pod.UID), nil
|
||||||
|
},
|
||||||
|
autoscalingListenerTestTimeout,
|
||||||
|
autoscalingListenerTestInterval,
|
||||||
|
).ShouldNot(BeEquivalentTo(oldPodUID), "Pod should be created")
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
var _ = Describe("Test AutoScalingListener controller with proxy", func() {
|
var _ = Describe("Test AutoScalingListener controller with proxy", func() {
|
||||||
@@ -887,7 +1036,6 @@ var _ = Describe("Test AutoScalingListener controller with template modification
|
|||||||
|
|
||||||
g.Expect(pod.ObjectMeta.Annotations).To(HaveKeyWithValue("test-annotation-key", "test-annotation-value"), "pod annotations should be copied from runner set template")
|
g.Expect(pod.ObjectMeta.Annotations).To(HaveKeyWithValue("test-annotation-key", "test-annotation-value"), "pod annotations should be copied from runner set template")
|
||||||
g.Expect(pod.ObjectMeta.Labels).To(HaveKeyWithValue("test-label-key", "test-label-value"), "pod labels should be copied from runner set template")
|
g.Expect(pod.ObjectMeta.Labels).To(HaveKeyWithValue("test-label-key", "test-label-value"), "pod labels should be copied from runner set template")
|
||||||
|
|
||||||
},
|
},
|
||||||
autoscalingListenerTestTimeout,
|
autoscalingListenerTestTimeout,
|
||||||
autoscalingListenerTestInterval).Should(Succeed(), "failed to create listener pod with proxy details")
|
autoscalingListenerTestInterval).Should(Succeed(), "failed to create listener pod with proxy details")
|
||||||
|
|||||||
@@ -27,8 +27,6 @@ import (
|
|||||||
"github.com/actions/actions-runner-controller/build"
|
"github.com/actions/actions-runner-controller/build"
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
otelCodes "go.opentelemetry.io/otel/codes"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
@@ -81,8 +79,7 @@ type AutoscalingRunnerSetReconciler struct {
|
|||||||
DefaultRunnerScaleSetListenerImagePullSecrets []string
|
DefaultRunnerScaleSetListenerImagePullSecrets []string
|
||||||
UpdateStrategy UpdateStrategy
|
UpdateStrategy UpdateStrategy
|
||||||
ActionsClient actions.MultiClient
|
ActionsClient actions.MultiClient
|
||||||
|
ResourceBuilder
|
||||||
resourceBuilder resourceBuilder
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -95,9 +92,6 @@ type AutoscalingRunnerSetReconciler struct {
|
|||||||
|
|
||||||
// Reconcile a AutoscalingRunnerSet resource to meet its desired spec.
|
// Reconcile a AutoscalingRunnerSet resource to meet its desired spec.
|
||||||
func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.Reconcile")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
log := r.Log.WithValues("autoscalingrunnerset", req.NamespacedName)
|
log := r.Log.WithValues("autoscalingrunnerset", req.NamespacedName)
|
||||||
|
|
||||||
autoscalingRunnerSet := new(v1alpha1.AutoscalingRunnerSet)
|
autoscalingRunnerSet := new(v1alpha1.AutoscalingRunnerSet)
|
||||||
@@ -139,17 +133,11 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
requeue, err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log)
|
if err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log); err != nil {
|
||||||
if err != nil {
|
|
||||||
log.Error(err, "Failed to remove finalizers on dependent resources")
|
log.Error(err, "Failed to remove finalizers on dependent resources")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if requeue {
|
|
||||||
log.Info("Waiting for dependent resources to be deleted")
|
|
||||||
return ctrl.Result{Requeue: true}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("Removing finalizer")
|
log.Info("Removing finalizer")
|
||||||
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName)
|
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName)
|
||||||
@@ -339,15 +327,6 @@ func (r *AutoscalingRunnerSetReconciler) drainingJobs(latestRunnerSetStatus *v1a
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
|
func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.cleanupListener")
|
|
||||||
defer span.End()
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
span.SetStatus(otelCodes.Error, "error")
|
|
||||||
span.RecordError(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
logger.Info("Cleaning up the listener")
|
logger.Info("Cleaning up the listener")
|
||||||
var listener v1alpha1.AutoscalingListener
|
var listener v1alpha1.AutoscalingListener
|
||||||
err = r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, &listener)
|
err = r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, &listener)
|
||||||
@@ -369,15 +348,6 @@ func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, au
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) cleanupEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
|
func (r *AutoscalingRunnerSetReconciler) cleanupEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.cleanupEphemeralRunnerSets")
|
|
||||||
defer span.End()
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
span.SetStatus(otelCodes.Error, "error")
|
|
||||||
span.RecordError(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
logger.Info("Cleaning up ephemeral runner sets")
|
logger.Info("Cleaning up ephemeral runner sets")
|
||||||
runnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet)
|
runnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -396,9 +366,6 @@ func (r *AutoscalingRunnerSetReconciler) cleanupEphemeralRunnerSets(ctx context.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.Context, oldRunnerSets []v1alpha1.EphemeralRunnerSet, logger logr.Logger) error {
|
func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.Context, oldRunnerSets []v1alpha1.EphemeralRunnerSet, logger logr.Logger) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.deleteEphemeralRunnerSets")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
for i := range oldRunnerSets {
|
for i := range oldRunnerSets {
|
||||||
rs := &oldRunnerSets[i]
|
rs := &oldRunnerSets[i]
|
||||||
// already deleted but contains finalizer so it still exists
|
// already deleted but contains finalizer so it still exists
|
||||||
@@ -415,16 +382,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (requeue bool, err error) {
|
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.removeFinalizersFromDependentResources")
|
|
||||||
defer span.End()
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
span.SetStatus(otelCodes.Error, "error")
|
|
||||||
span.RecordError(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
c := autoscalingRunnerSetFinalizerDependencyCleaner{
|
c := autoscalingRunnerSetFinalizerDependencyCleaner{
|
||||||
client: r.Client,
|
client: r.Client,
|
||||||
autoscalingRunnerSet: autoscalingRunnerSet,
|
autoscalingRunnerSet: autoscalingRunnerSet,
|
||||||
@@ -439,18 +397,10 @@ func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(
|
|||||||
c.removeManagerRoleBindingFinalizer(ctx)
|
c.removeManagerRoleBindingFinalizer(ctx)
|
||||||
c.removeManagerRoleFinalizer(ctx)
|
c.removeManagerRoleFinalizer(ctx)
|
||||||
|
|
||||||
requeue, err = c.result()
|
return c.Err()
|
||||||
if err != nil {
|
|
||||||
logger.Error(err, "Failed to cleanup finalizer from dependent resource")
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
return requeue, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.createRunnerScaleSet")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
logger.Info("Creating a new runner scale set")
|
logger.Info("Creating a new runner scale set")
|
||||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||||
if len(autoscalingRunnerSet.Spec.RunnerScaleSetName) == 0 {
|
if len(autoscalingRunnerSet.Spec.RunnerScaleSetName) == 0 {
|
||||||
@@ -542,9 +492,6 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.updateRunnerScaleSetRunnerGroup")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err, "Failed to parse runner scale set ID")
|
logger.Error(err, "Failed to parse runner scale set ID")
|
||||||
@@ -588,9 +535,6 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetRunnerGroup(ctx con
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.updateRunnerScaleSetName")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err, "Failed to parse runner scale set ID")
|
logger.Error(err, "Failed to parse runner scale set ID")
|
||||||
@@ -627,9 +571,6 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
|
func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.deleteRunnerScaleSet")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
scaleSetId, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
|
scaleSetId, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
|
||||||
if !ok {
|
if !ok {
|
||||||
// Annotation not being present can occur in 3 scenarios
|
// Annotation not being present can occur in 3 scenarios
|
||||||
@@ -681,10 +622,7 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.createEphemeralRunnerSet")
|
desiredRunnerSet, err := r.ResourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
desiredRunnerSet, err := r.resourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err, "Could not create EphemeralRunnerSet")
|
log.Error(err, "Could not create EphemeralRunnerSet")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -706,9 +644,6 @@ func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Co
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (ctrl.Result, error) {
|
func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.createAutoScalingListenerForRunnerSet")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var imagePullSecrets []corev1.LocalObjectReference
|
var imagePullSecrets []corev1.LocalObjectReference
|
||||||
for _, imagePullSecret := range r.DefaultRunnerScaleSetListenerImagePullSecrets {
|
for _, imagePullSecret := range r.DefaultRunnerScaleSetListenerImagePullSecrets {
|
||||||
imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{
|
imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{
|
||||||
@@ -716,7 +651,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
autoscalingListener, err := r.resourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
|
autoscalingListener, err := r.ResourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err, "Could not create AutoscalingListener spec")
|
log.Error(err, "Could not create AutoscalingListener spec")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -733,9 +668,6 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) {
|
func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.listEphemeralRunnerSets")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
list := new(v1alpha1.EphemeralRunnerSetList)
|
list := new(v1alpha1.EphemeralRunnerSetList)
|
||||||
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingRunnerSet.Name}); err != nil {
|
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingRunnerSet.Name}); err != nil {
|
||||||
return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
|
return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
|
||||||
@@ -745,9 +677,6 @@ func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Con
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (actions.ActionsService, error) {
|
func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (actions.ActionsService, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.actionsClientFor")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var configSecret corev1.Secret
|
var configSecret corev1.Secret
|
||||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, &configSecret); err != nil {
|
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, &configSecret); err != nil {
|
||||||
return nil, fmt.Errorf("failed to find GitHub config secret: %w", err)
|
return nil, fmt.Errorf("failed to find GitHub config secret: %w", err)
|
||||||
@@ -768,9 +697,6 @@ func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, a
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *AutoscalingRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) ([]actions.ClientOption, error) {
|
func (r *AutoscalingRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) ([]actions.ClientOption, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "AutoscalingRunnerSetReconciler.actionsClientOptionsFor")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var options []actions.ClientOption
|
var options []actions.ClientOption
|
||||||
|
|
||||||
if autoscalingRunnerSet.Spec.Proxy != nil {
|
if autoscalingRunnerSet.Spec.Proxy != nil {
|
||||||
@@ -846,20 +772,16 @@ type autoscalingRunnerSetFinalizerDependencyCleaner struct {
|
|||||||
autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
|
autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
|
||||||
logger logr.Logger
|
logger logr.Logger
|
||||||
|
|
||||||
// fields to operate on
|
err error
|
||||||
requeue bool
|
|
||||||
err error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) result() (requeue bool, err error) {
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) Err() error {
|
||||||
return c.requeue, c.err
|
return c.err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleBindingFinalizer(ctx context.Context) {
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleBindingFinalizer(ctx context.Context) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeKubernetesModeRoleBindingFinalizer")
|
if c.err != nil {
|
||||||
defer span.End()
|
c.logger.Info("Skipping cleaning up kubernetes mode service account")
|
||||||
|
|
||||||
if c.requeue || c.err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -890,7 +812,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
|
|||||||
c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err)
|
c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.requeue = true
|
|
||||||
c.logger.Info("Removed finalizer from container mode kubernetes role binding", "name", roleBindingName)
|
c.logger.Info("Removed finalizer from container mode kubernetes role binding", "name", roleBindingName)
|
||||||
return
|
return
|
||||||
case err != nil && !kerrors.IsNotFound(err):
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
@@ -903,10 +824,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleFinalizer(ctx context.Context) {
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleFinalizer(ctx context.Context) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeKubernetesModeRoleFinalizer")
|
if c.err != nil {
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if c.requeue || c.err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -936,7 +854,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
|
|||||||
c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err)
|
c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.requeue = true
|
|
||||||
c.logger.Info("Removed finalizer from container mode kubernetes role")
|
c.logger.Info("Removed finalizer from container mode kubernetes role")
|
||||||
return
|
return
|
||||||
case err != nil && !kerrors.IsNotFound(err):
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
@@ -949,10 +866,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeServiceAccountFinalizer(ctx context.Context) {
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeServiceAccountFinalizer(ctx context.Context) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeKubernetesModeServiceAccountFinalizer")
|
if c.err != nil {
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if c.requeue || c.err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -983,7 +897,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeSer
|
|||||||
c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err)
|
c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.requeue = true
|
|
||||||
c.logger.Info("Removed finalizer from container mode kubernetes service account")
|
c.logger.Info("Removed finalizer from container mode kubernetes service account")
|
||||||
return
|
return
|
||||||
case err != nil && !kerrors.IsNotFound(err):
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
@@ -996,10 +909,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeSer
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServiceAccountFinalizer(ctx context.Context) {
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServiceAccountFinalizer(ctx context.Context) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeNoPermissionServiceAccountFinalizer")
|
if c.err != nil {
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if c.requeue || c.err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1030,7 +940,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServi
|
|||||||
c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err)
|
c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.requeue = true
|
|
||||||
c.logger.Info("Removed finalizer from no permission service account", "name", serviceAccountName)
|
c.logger.Info("Removed finalizer from no permission service account", "name", serviceAccountName)
|
||||||
return
|
return
|
||||||
case err != nil && !kerrors.IsNotFound(err):
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
@@ -1043,10 +952,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServi
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinalizer(ctx context.Context) {
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinalizer(ctx context.Context) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeGitHubSecretFinalizer")
|
if c.err != nil {
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if c.requeue || c.err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1077,7 +983,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinal
|
|||||||
c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err)
|
c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.requeue = true
|
|
||||||
c.logger.Info("Removed finalizer from GitHub secret", "name", githubSecretName)
|
c.logger.Info("Removed finalizer from GitHub secret", "name", githubSecretName)
|
||||||
return
|
return
|
||||||
case err != nil && !kerrors.IsNotFound(err) && !kerrors.IsForbidden(err):
|
case err != nil && !kerrors.IsNotFound(err) && !kerrors.IsForbidden(err):
|
||||||
@@ -1090,10 +995,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinal
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindingFinalizer(ctx context.Context) {
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindingFinalizer(ctx context.Context) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeManagerRoleBindingFinalizer")
|
if c.err != nil {
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if c.requeue || c.err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1124,7 +1026,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindin
|
|||||||
c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err)
|
c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.requeue = true
|
|
||||||
c.logger.Info("Removed finalizer from manager role binding", "name", managerRoleBindingName)
|
c.logger.Info("Removed finalizer from manager role binding", "name", managerRoleBindingName)
|
||||||
return
|
return
|
||||||
case err != nil && !kerrors.IsNotFound(err):
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
@@ -1137,10 +1038,7 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindin
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinalizer(ctx context.Context) {
|
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinalizer(ctx context.Context) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "autoscalingRunnerSetFinalizerDependencyCleaner.removeManagerRoleFinalizer")
|
if c.err != nil {
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if c.requeue || c.err != nil {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1171,7 +1069,6 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinali
|
|||||||
c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err)
|
c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.requeue = true
|
|
||||||
c.logger.Info("Removed finalizer from manager role", "name", managerRoleName)
|
c.logger.Info("Removed finalizer from manager role", "name", managerRoleName)
|
||||||
return
|
return
|
||||||
case err != nil && !kerrors.IsNotFound(err):
|
case err != nil && !kerrors.IsNotFound(err):
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
autoscalingRunnerSetTestTimeout = time.Second * 5
|
autoscalingRunnerSetTestTimeout = time.Second * 20
|
||||||
autoscalingRunnerSetTestInterval = time.Millisecond * 250
|
autoscalingRunnerSetTestInterval = time.Millisecond * 250
|
||||||
autoscalingRunnerSetTestGitHubToken = "gh_token"
|
autoscalingRunnerSetTestGitHubToken = "gh_token"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package actionsgithubcom
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
kclient "sigs.k8s.io/controller-runtime/pkg/client"
|
kclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -17,9 +16,6 @@ type patcher interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func patch[T object[T]](ctx context.Context, client patcher, obj T, update func(obj T)) error {
|
func patch[T object[T]](ctx context.Context, client patcher, obj T, update func(obj T)) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "patch")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
original := obj.DeepCopy()
|
original := obj.DeepCopy()
|
||||||
update(obj)
|
update(obj)
|
||||||
return client.Patch(ctx, obj, kclient.MergeFrom(original))
|
return client.Patch(ctx, obj, kclient.MergeFrom(original))
|
||||||
@@ -30,9 +26,6 @@ type subResourcePatcher interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func patchSubResource[T object[T]](ctx context.Context, client subResourcePatcher, obj T, update func(obj T)) error {
|
func patchSubResource[T object[T]](ctx context.Context, client subResourcePatcher, obj T, update func(obj T)) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "patchSubResource")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
original := obj.DeepCopy()
|
original := obj.DeepCopy()
|
||||||
update(obj)
|
update(obj)
|
||||||
return client.Patch(ctx, obj, kclient.MergeFrom(original))
|
return client.Patch(ctx, obj, kclient.MergeFrom(original))
|
||||||
|
|||||||
@@ -26,8 +26,6 @@ import (
|
|||||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
otelCodes "go.opentelemetry.io/otel/codes"
|
|
||||||
"go.uber.org/multierr"
|
"go.uber.org/multierr"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
@@ -51,10 +49,10 @@ const (
|
|||||||
// EphemeralRunnerReconciler reconciles a EphemeralRunner object
|
// EphemeralRunnerReconciler reconciles a EphemeralRunner object
|
||||||
type EphemeralRunnerReconciler struct {
|
type EphemeralRunnerReconciler struct {
|
||||||
client.Client
|
client.Client
|
||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
ActionsClient actions.MultiClient
|
ActionsClient actions.MultiClient
|
||||||
resourceBuilder resourceBuilder
|
ResourceBuilder
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -70,9 +68,6 @@ type EphemeralRunnerReconciler struct {
|
|||||||
// For more details, check Reconcile and its Result here:
|
// For more details, check Reconcile and its Result here:
|
||||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.6.4/pkg/reconcile
|
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.6.4/pkg/reconcile
|
||||||
func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.Reconcile")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
log := r.Log.WithValues("ephemeralrunner", req.NamespacedName)
|
log := r.Log.WithValues("ephemeralrunner", req.NamespacedName)
|
||||||
|
|
||||||
ephemeralRunner := new(v1alpha1.EphemeralRunner)
|
ephemeralRunner := new(v1alpha1.EphemeralRunner)
|
||||||
@@ -154,6 +149,19 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
|
||||||
|
log.Info("Adding finalizer")
|
||||||
|
if err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||||
|
controllerutil.AddFinalizer(obj, ephemeralRunnerFinalizerName)
|
||||||
|
}); err != nil {
|
||||||
|
log.Error(err, "Failed to update with finalizer set")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Successfully added finalizer")
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) {
|
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerActionsFinalizerName) {
|
||||||
log.Info("Adding runner registration finalizer")
|
log.Info("Adding runner registration finalizer")
|
||||||
err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||||
@@ -165,18 +173,6 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Successfully added runner registration finalizer")
|
log.Info("Successfully added runner registration finalizer")
|
||||||
}
|
|
||||||
|
|
||||||
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
|
|
||||||
log.Info("Adding finalizer")
|
|
||||||
if err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
|
||||||
controllerutil.AddFinalizer(obj, ephemeralRunnerFinalizerName)
|
|
||||||
}); err != nil {
|
|
||||||
log.Error(err, "Failed to update with finalizer set")
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info("Successfully added finalizer")
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -311,9 +307,6 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
|
func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupRunnerFromService")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil {
|
if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil {
|
||||||
actionsError := &actions.ActionsError{}
|
actionsError := &actions.ActionsError{}
|
||||||
if !errors.As(err, &actionsError) {
|
if !errors.As(err, &actionsError) {
|
||||||
@@ -343,15 +336,6 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerFromService(ctx context.Context
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (deleted bool, err error) {
|
func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (deleted bool, err error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupResources")
|
|
||||||
defer span.End()
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
span.SetStatus(otelCodes.Error, "error")
|
|
||||||
span.RecordError(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
log.Info("Cleaning up the runner pod")
|
log.Info("Cleaning up the runner pod")
|
||||||
pod := new(corev1.Pod)
|
pod := new(corev1.Pod)
|
||||||
err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, pod)
|
err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, pod)
|
||||||
@@ -390,15 +374,6 @@ func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, epheme
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerReconciler) cleanupContainerHooksResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
|
func (r *EphemeralRunnerReconciler) cleanupContainerHooksResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupContainerHooksResources")
|
|
||||||
defer span.End()
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
span.SetStatus(otelCodes.Error, "error")
|
|
||||||
span.RecordError(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
log.Info("Cleaning up runner linked pods")
|
log.Info("Cleaning up runner linked pods")
|
||||||
done, err = r.cleanupRunnerLinkedPods(ctx, ephemeralRunner, log)
|
done, err = r.cleanupRunnerLinkedPods(ctx, ephemeralRunner, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -419,15 +394,6 @@ func (r *EphemeralRunnerReconciler) cleanupContainerHooksResources(ctx context.C
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
|
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupRunnerLinkedPods")
|
|
||||||
defer span.End()
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
span.SetStatus(otelCodes.Error, "error")
|
|
||||||
span.RecordError(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
runnerLinedLabels := client.MatchingLabels(
|
runnerLinedLabels := client.MatchingLabels(
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"runner-pod": ephemeralRunner.Name,
|
"runner-pod": ephemeralRunner.Name,
|
||||||
@@ -463,15 +429,6 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
|
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.cleanupRunnerLinkedSecrets")
|
|
||||||
defer span.End()
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
span.SetStatus(otelCodes.Error, "error")
|
|
||||||
span.RecordError(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
runnerLinkedLabels := client.MatchingLabels(
|
runnerLinkedLabels := client.MatchingLabels(
|
||||||
map[string]string{
|
map[string]string{
|
||||||
"runner-pod": ephemeralRunner.ObjectMeta.Name,
|
"runner-pod": ephemeralRunner.ObjectMeta.Name,
|
||||||
@@ -507,9 +464,6 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, errMessage string, reason string, log logr.Logger) error {
|
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, errMessage string, reason string, log logr.Logger) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.markAsFailed")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
log.Info("Updating ephemeral runner status to Failed")
|
log.Info("Updating ephemeral runner status to Failed")
|
||||||
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||||
obj.Status.Phase = corev1.PodFailed
|
obj.Status.Phase = corev1.PodFailed
|
||||||
@@ -529,9 +483,6 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.markAsFinished")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
log.Info("Updating ephemeral runner status to Finished")
|
log.Info("Updating ephemeral runner status to Finished")
|
||||||
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||||
obj.Status.Phase = corev1.PodSucceeded
|
obj.Status.Phase = corev1.PodSucceeded
|
||||||
@@ -546,9 +497,6 @@ func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemera
|
|||||||
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
|
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
|
||||||
// It should not be responsible for setting the status to Failed.
|
// It should not be responsible for setting the status to Failed.
|
||||||
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.deletePodAsFailed")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)
|
log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)
|
||||||
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
|
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
|
||||||
@@ -576,9 +524,6 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
|
|||||||
// updateStatusWithRunnerConfig fetches runtime configuration needed by the runner
|
// updateStatusWithRunnerConfig fetches runtime configuration needed by the runner
|
||||||
// This method should always set .status.runnerId and .status.runnerJITConfig
|
// This method should always set .status.runnerId and .status.runnerJITConfig
|
||||||
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
|
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.updateStatusWithRunnerConfig")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
// Runner is not registered with the service. We need to register it first
|
// Runner is not registered with the service. We need to register it first
|
||||||
log.Info("Creating ephemeral runner JIT config")
|
log.Info("Creating ephemeral runner JIT config")
|
||||||
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunner)
|
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunner)
|
||||||
@@ -589,6 +534,14 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
|
|||||||
jitSettings := &actions.RunnerScaleSetJitRunnerSetting{
|
jitSettings := &actions.RunnerScaleSetJitRunnerSetting{
|
||||||
Name: ephemeralRunner.Name,
|
Name: ephemeralRunner.Name,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i := range ephemeralRunner.Spec.Spec.Containers {
|
||||||
|
if ephemeralRunner.Spec.Spec.Containers[i].Name == EphemeralRunnerContainerName &&
|
||||||
|
ephemeralRunner.Spec.Spec.Containers[i].WorkingDir != "" {
|
||||||
|
jitSettings.WorkFolder = ephemeralRunner.Spec.Spec.Containers[i].WorkingDir
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
jitConfig, err := actionsClient.GenerateJitRunnerConfig(ctx, jitSettings, ephemeralRunner.Spec.RunnerScaleSetId)
|
jitConfig, err := actionsClient.GenerateJitRunnerConfig(ctx, jitSettings, ephemeralRunner.Spec.RunnerScaleSetId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
actionsError := &actions.ActionsError{}
|
actionsError := &actions.ActionsError{}
|
||||||
@@ -661,9 +614,6 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) {
|
func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.createPod")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var envs []corev1.EnvVar
|
var envs []corev1.EnvVar
|
||||||
if runner.Spec.ProxySecretRef != "" {
|
if runner.Spec.ProxySecretRef != "" {
|
||||||
http := corev1.EnvVar{
|
http := corev1.EnvVar{
|
||||||
@@ -713,7 +663,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Creating new pod for ephemeral runner")
|
log.Info("Creating new pod for ephemeral runner")
|
||||||
newPod := r.resourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...)
|
newPod := r.ResourceBuilder.newEphemeralRunnerPod(ctx, runner, secret, envs...)
|
||||||
|
|
||||||
if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil {
|
if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil {
|
||||||
log.Error(err, "Failed to set controller reference to a new pod")
|
log.Error(err, "Failed to set controller reference to a new pod")
|
||||||
@@ -737,11 +687,8 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
|
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (*ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.createSecret")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
log.Info("Creating new secret for ephemeral runner")
|
log.Info("Creating new secret for ephemeral runner")
|
||||||
jitSecret := r.resourceBuilder.newEphemeralRunnerJitSecret(runner)
|
jitSecret := r.ResourceBuilder.newEphemeralRunnerJitSecret(runner)
|
||||||
|
|
||||||
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
|
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
|
||||||
return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %v", err)
|
return &ctrl.Result{}, fmt.Errorf("failed to set controller reference: %v", err)
|
||||||
@@ -762,9 +709,6 @@ func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1
|
|||||||
// The event should not be re-queued since the termination status should be set
|
// The event should not be re-queued since the termination status should be set
|
||||||
// before proceeding with reconciliation logic
|
// before proceeding with reconciliation logic
|
||||||
func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.updateRunStatusFromPod")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed {
|
if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -788,9 +732,6 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) (actions.ActionsService, error) {
|
func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) (actions.ActionsService, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.actionsClientFor")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
secret := new(corev1.Secret)
|
secret := new(corev1.Secret)
|
||||||
if err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: runner.Spec.GitHubConfigSecret}, secret); err != nil {
|
if err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: runner.Spec.GitHubConfigSecret}, secret); err != nil {
|
||||||
return nil, fmt.Errorf("failed to get secret: %w", err)
|
return nil, fmt.Errorf("failed to get secret: %w", err)
|
||||||
@@ -811,9 +752,6 @@ func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerReconciler) actionsClientOptionsFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) ([]actions.ClientOption, error) {
|
func (r *EphemeralRunnerReconciler) actionsClientOptionsFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) ([]actions.ClientOption, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.actionsClientOptionsFor")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var opts []actions.ClientOption
|
var opts []actions.ClientOption
|
||||||
if runner.Spec.Proxy != nil {
|
if runner.Spec.Proxy != nil {
|
||||||
proxyFunc, err := runner.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
|
proxyFunc, err := runner.Spec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
|
||||||
@@ -863,15 +801,6 @@ func (r *EphemeralRunnerReconciler) actionsClientOptionsFor(ctx context.Context,
|
|||||||
// runnerRegisteredWithService checks if the runner is still registered with the service
|
// runnerRegisteredWithService checks if the runner is still registered with the service
|
||||||
// Returns found=false and err=nil if ephemeral runner does not exist in GitHub service and should be deleted
|
// Returns found=false and err=nil if ephemeral runner does not exist in GitHub service and should be deleted
|
||||||
func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (found bool, err error) {
|
func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (found bool, err error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.runnerRegisteredWithService")
|
|
||||||
defer span.End()
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
span.SetStatus(otelCodes.Error, "error")
|
|
||||||
span.RecordError(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
actionsClient, err := r.actionsClientFor(ctx, runner)
|
actionsClient, err := r.actionsClientFor(ctx, runner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("failed to get Actions client for ScaleSet: %w", err)
|
return false, fmt.Errorf("failed to get Actions client for ScaleSet: %w", err)
|
||||||
@@ -899,9 +828,6 @@ func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Conte
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerReconciler.deleteRunnerFromService")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
client, err := r.actionsClientFor(ctx, ephemeralRunner)
|
client, err := r.actionsClientFor(ctx, ephemeralRunner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get actions client for runner: %v", err)
|
return fmt.Errorf("failed to get actions client for runner: %v", err)
|
||||||
@@ -918,13 +844,14 @@ func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetupWithManager sets up the controller with the Manager.
|
// SetupWithManager sets up the controller with the Manager.
|
||||||
func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager, opts ...Option) error {
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
return builderWithOptions(
|
||||||
For(&v1alpha1.EphemeralRunner{}).
|
ctrl.NewControllerManagedBy(mgr).
|
||||||
Owns(&corev1.Pod{}).
|
For(&v1alpha1.EphemeralRunner{}).
|
||||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
Owns(&corev1.Pod{}).
|
||||||
Named("ephemeral-runner-controller").
|
WithEventFilter(predicate.ResourceVersionChangedPredicate{}),
|
||||||
Complete(r)
|
opts,
|
||||||
|
).Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runnerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {
|
func runnerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {
|
||||||
|
|||||||
@@ -29,9 +29,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
timeout = time.Second * 10
|
ephemeralRunnerTimeout = time.Second * 20
|
||||||
interval = time.Millisecond * 250
|
ephemeralRunnerInterval = time.Millisecond * 250
|
||||||
runnerImage = "ghcr.io/actions/actions-runner:latest"
|
runnerImage = "ghcr.io/actions/actions-runner:latest"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newExampleRunner(name, namespace, configSecretName string) *v1alpha1.EphemeralRunner {
|
func newExampleRunner(name, namespace, configSecretName string) *v1alpha1.EphemeralRunner {
|
||||||
@@ -133,9 +133,9 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
n := len(created.Finalizers) // avoid capacity mismatch
|
n := len(created.Finalizers) // avoid capacity mismatch
|
||||||
return created.Finalizers[:n:n], nil
|
return created.Finalizers[:n:n], nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo([]string{ephemeralRunnerActionsFinalizerName, ephemeralRunnerFinalizerName}))
|
).Should(BeEquivalentTo([]string{ephemeralRunnerFinalizerName, ephemeralRunnerActionsFinalizerName}))
|
||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
func() (bool, error) {
|
func() (bool, error) {
|
||||||
@@ -147,8 +147,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
_, ok := secret.Data[jitTokenKey]
|
_, ok := secret.Data[jitTokenKey]
|
||||||
return ok, nil
|
return ok, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
@@ -160,8 +160,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
|
|
||||||
return pod.Name, nil
|
return pod.Name, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(ephemeralRunner.Name))
|
).Should(BeEquivalentTo(ephemeralRunner.Name))
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -184,8 +184,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -203,7 +203,7 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
return updated.Status.Phase, nil
|
return updated.Status.Phase, nil
|
||||||
}, timeout, interval).Should(BeEquivalentTo(corev1.PodFailed))
|
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodFailed))
|
||||||
Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
|
Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
|
||||||
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
|
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
|
||||||
})
|
})
|
||||||
@@ -247,8 +247,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
// create runner linked secret
|
// create runner linked secret
|
||||||
@@ -273,8 +273,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
err = k8sClient.Delete(ctx, ephemeralRunner)
|
err = k8sClient.Delete(ctx, ephemeralRunner)
|
||||||
@@ -289,8 +289,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return kerrors.IsNotFound(err), nil
|
return kerrors.IsNotFound(err), nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
@@ -302,8 +302,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return kerrors.IsNotFound(err), nil
|
return kerrors.IsNotFound(err), nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
@@ -315,8 +315,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return kerrors.IsNotFound(err), nil
|
return kerrors.IsNotFound(err), nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
@@ -328,8 +328,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return kerrors.IsNotFound(err), nil
|
return kerrors.IsNotFound(err), nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
@@ -341,8 +341,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return kerrors.IsNotFound(err), nil
|
return kerrors.IsNotFound(err), nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -356,8 +356,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return updatedEphemeralRunner.Status.RunnerId, nil
|
return updatedEphemeralRunner.Status.RunnerId, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeNumerically(">", 0))
|
).Should(BeNumerically(">", 0))
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -371,8 +371,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
for _, phase := range []corev1.PodPhase{corev1.PodRunning, corev1.PodPending} {
|
for _, phase := range []corev1.PodPhase{corev1.PodRunning, corev1.PodPending} {
|
||||||
@@ -395,8 +395,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return updated.Status.Phase, nil
|
return updated.Status.Phase, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(phase))
|
).Should(BeEquivalentTo(phase))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -411,8 +411,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
pod.Status.Phase = corev1.PodRunning
|
pod.Status.Phase = corev1.PodRunning
|
||||||
@@ -427,7 +427,7 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return updated.Status.Phase, nil
|
return updated.Status.Phase, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
).Should(BeEquivalentTo(""))
|
).Should(BeEquivalentTo(""))
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -462,8 +462,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||||
return false, fmt.Errorf("pod haven't failed for 5 times.")
|
return false, fmt.Errorf("pod haven't failed for 5 times.")
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true), "we should stop creating pod after 5 failures")
|
).Should(BeEquivalentTo(true), "we should stop creating pod after 5 failures")
|
||||||
|
|
||||||
// In case we still have pod created due to controller-runtime cache delay, mark the container as exited
|
// In case we still have pod created due to controller-runtime cache delay, mark the container as exited
|
||||||
@@ -488,7 +488,7 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return updated.Status.Reason, nil
|
return updated.Status.Reason, nil
|
||||||
}, timeout, interval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures")
|
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo("TooManyPodFailures"), "Reason should be TooManyPodFailures")
|
||||||
|
|
||||||
// EphemeralRunner should not have any pod
|
// EphemeralRunner should not have any pod
|
||||||
Eventually(func() (bool, error) {
|
Eventually(func() (bool, error) {
|
||||||
@@ -497,7 +497,7 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return kerrors.IsNotFound(err), nil
|
return kerrors.IsNotFound(err), nil
|
||||||
}, timeout, interval).Should(BeEquivalentTo(true))
|
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("It should re-create pod on eviction", func() {
|
It("It should re-create pod on eviction", func() {
|
||||||
@@ -510,8 +510,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
pod.Status.Phase = corev1.PodFailed
|
pod.Status.Phase = corev1.PodFailed
|
||||||
@@ -530,7 +530,7 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return len(updated.Status.Failures) == 1, nil
|
return len(updated.Status.Failures) == 1, nil
|
||||||
}, timeout, interval).Should(BeEquivalentTo(true))
|
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
// should re-create after failure
|
// should re-create after failure
|
||||||
Eventually(
|
Eventually(
|
||||||
@@ -541,8 +541,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -555,8 +555,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||||
@@ -577,7 +577,7 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return len(updated.Status.Failures) == 1, nil
|
return len(updated.Status.Failures) == 1, nil
|
||||||
}, timeout, interval).Should(BeEquivalentTo(true))
|
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
// should re-create after failure
|
// should re-create after failure
|
||||||
Eventually(
|
Eventually(
|
||||||
@@ -588,8 +588,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -602,8 +602,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
// first set phase to running
|
// first set phase to running
|
||||||
@@ -627,8 +627,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return updated.Status.Phase, nil
|
return updated.Status.Phase, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(corev1.PodRunning))
|
).Should(BeEquivalentTo(corev1.PodRunning))
|
||||||
|
|
||||||
// set phase to succeeded
|
// set phase to succeeded
|
||||||
@@ -644,7 +644,7 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
}
|
}
|
||||||
return updated.Status.Phase, nil
|
return updated.Status.Phase, nil
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
).Should(BeEquivalentTo(corev1.PodRunning))
|
).Should(BeEquivalentTo(corev1.PodRunning))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@@ -700,7 +700,7 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}, timeout, interval).Should(BeEquivalentTo(true))
|
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(true))
|
||||||
|
|
||||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||||
Name: EphemeralRunnerContainerName,
|
Name: EphemeralRunnerContainerName,
|
||||||
@@ -720,7 +720,7 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
return updated.Status.Phase, nil
|
return updated.Status.Phase, nil
|
||||||
}, timeout, interval).Should(BeEquivalentTo(corev1.PodSucceeded))
|
}, ephemeralRunnerTimeout, ephemeralRunnerInterval).Should(BeEquivalentTo(corev1.PodSucceeded))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -800,7 +800,7 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
return proxySuccessfulllyCalled
|
return proxySuccessfulllyCalled
|
||||||
},
|
},
|
||||||
2*time.Second,
|
2*time.Second,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -825,8 +825,8 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||||
g.Expect(err).To(BeNil(), "failed to get ephemeral runner pod")
|
g.Expect(err).To(BeNil(), "failed to get ephemeral runner pod")
|
||||||
},
|
},
|
||||||
timeout,
|
ephemeralRunnerTimeout,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(Succeed(), "failed to get ephemeral runner pod")
|
).Should(Succeed(), "failed to get ephemeral runner pod")
|
||||||
|
|
||||||
Expect(pod.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{
|
Expect(pod.Spec.Containers[0].Env).To(ContainElement(corev1.EnvVar{
|
||||||
@@ -958,7 +958,7 @@ var _ = Describe("EphemeralRunner", func() {
|
|||||||
return serverSuccessfullyCalled
|
return serverSuccessfullyCalled
|
||||||
},
|
},
|
||||||
2*time.Second,
|
2*time.Second,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeTrue(), "failed to contact server")
|
).Should(BeTrue(), "failed to contact server")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -28,7 +28,6 @@ import (
|
|||||||
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
|
"github.com/actions/actions-runner-controller/controllers/actions.github.com/metrics"
|
||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
"go.uber.org/multierr"
|
"go.uber.org/multierr"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
@@ -54,7 +53,7 @@ type EphemeralRunnerSetReconciler struct {
|
|||||||
|
|
||||||
PublishMetrics bool
|
PublishMetrics bool
|
||||||
|
|
||||||
resourceBuilder resourceBuilder
|
ResourceBuilder
|
||||||
}
|
}
|
||||||
|
|
||||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
|
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -76,9 +75,6 @@ type EphemeralRunnerSetReconciler struct {
|
|||||||
// be to bring the count of EphemeralRunners to the desired one, not to patch this resource
|
// be to bring the count of EphemeralRunners to the desired one, not to patch this resource
|
||||||
// until it is safe to do so
|
// until it is safe to do so
|
||||||
func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.Reconcile")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
log := r.Log.WithValues("ephemeralrunnerset", req.NamespacedName)
|
log := r.Log.WithValues("ephemeralrunnerset", req.NamespacedName)
|
||||||
|
|
||||||
ephemeralRunnerSet := new(v1alpha1.EphemeralRunnerSet)
|
ephemeralRunnerSet := new(v1alpha1.EphemeralRunnerSet)
|
||||||
@@ -254,9 +250,6 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerSetReconciler) cleanupFinishedEphemeralRunners(ctx context.Context, finishedEphemeralRunners []*v1alpha1.EphemeralRunner, log logr.Logger) error {
|
func (r *EphemeralRunnerSetReconciler) cleanupFinishedEphemeralRunners(ctx context.Context, finishedEphemeralRunners []*v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.cleanupFinishedEphemeralRunners")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
// cleanup finished runners and proceed
|
// cleanup finished runners and proceed
|
||||||
var errs []error
|
var errs []error
|
||||||
for i := range finishedEphemeralRunners {
|
for i := range finishedEphemeralRunners {
|
||||||
@@ -272,9 +265,6 @@ func (r *EphemeralRunnerSetReconciler) cleanupFinishedEphemeralRunners(ctx conte
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error {
|
func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.cleanUpProxySecret")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy == nil {
|
if ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -294,9 +284,6 @@ func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) {
|
func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.cleanUpEphemeralRunners")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
|
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
|
||||||
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: ephemeralRunnerSet.Name})
|
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: ephemeralRunnerSet.Name})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -370,13 +357,10 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
|
|||||||
|
|
||||||
// createEphemeralRunners provisions `count` number of v1alpha1.EphemeralRunner resources in the cluster.
|
// createEphemeralRunners provisions `count` number of v1alpha1.EphemeralRunner resources in the cluster.
|
||||||
func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Context, runnerSet *v1alpha1.EphemeralRunnerSet, count int, log logr.Logger) error {
|
func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Context, runnerSet *v1alpha1.EphemeralRunnerSet, count int, log logr.Logger) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.createEphemeralRunners")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
// Track multiple errors at once and return the bundle.
|
// Track multiple errors at once and return the bundle.
|
||||||
errs := make([]error, 0)
|
errs := make([]error, 0)
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
ephemeralRunner := r.resourceBuilder.newEphemeralRunner(runnerSet)
|
ephemeralRunner := r.ResourceBuilder.newEphemeralRunner(runnerSet)
|
||||||
if runnerSet.Spec.EphemeralRunnerSpec.Proxy != nil {
|
if runnerSet.Spec.EphemeralRunnerSpec.Proxy != nil {
|
||||||
ephemeralRunner.Spec.ProxySecretRef = proxyEphemeralRunnerSetSecretName(runnerSet)
|
ephemeralRunner.Spec.ProxySecretRef = proxyEphemeralRunnerSetSecretName(runnerSet)
|
||||||
}
|
}
|
||||||
@@ -402,9 +386,6 @@ func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Contex
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error {
|
func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.createProxySecret")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
proxySecretData, err := ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) {
|
proxySecretData, err := ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy.ToSecretData(func(s string) (*corev1.Secret, error) {
|
||||||
secret := new(corev1.Secret)
|
secret := new(corev1.Secret)
|
||||||
err := r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunnerSet.Namespace, Name: s}, secret)
|
err := r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunnerSet.Namespace, Name: s}, secret)
|
||||||
@@ -450,9 +431,6 @@ func (r *EphemeralRunnerSetReconciler) createProxySecret(ctx context.Context, ep
|
|||||||
// When this happens, the next reconcile loop will try to delete the remaining ephemeral runners
|
// When this happens, the next reconcile loop will try to delete the remaining ephemeral runners
|
||||||
// after we get notified by any of the `v1alpha1.EphemeralRunner.Status` updates.
|
// after we get notified by any of the `v1alpha1.EphemeralRunner.Status` updates.
|
||||||
func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners []*v1alpha1.EphemeralRunner, count int, log logr.Logger) error {
|
func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners []*v1alpha1.EphemeralRunner, count int, log logr.Logger) error {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.deleteIdleEphemeralRunners")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if count <= 0 {
|
if count <= 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -499,9 +477,6 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, actionsClient actions.ActionsService, log logr.Logger) (bool, error) {
|
func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, actionsClient actions.ActionsService, log logr.Logger) (bool, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.deleteEphemeralRunnerWithActionsClient")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
|
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
|
||||||
actionsError := &actions.ActionsError{}
|
actionsError := &actions.ActionsError{}
|
||||||
if !errors.As(err, &actionsError) {
|
if !errors.As(err, &actionsError) {
|
||||||
@@ -528,9 +503,6 @@ func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ct
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) (actions.ActionsService, error) {
|
func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) (actions.ActionsService, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.actionsClientFor")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
secret := new(corev1.Secret)
|
secret := new(corev1.Secret)
|
||||||
if err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: rs.Spec.EphemeralRunnerSpec.GitHubConfigSecret}, secret); err != nil {
|
if err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: rs.Spec.EphemeralRunnerSpec.GitHubConfigSecret}, secret); err != nil {
|
||||||
return nil, fmt.Errorf("failed to get secret: %w", err)
|
return nil, fmt.Errorf("failed to get secret: %w", err)
|
||||||
@@ -551,9 +523,6 @@ func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) ([]actions.ClientOption, error) {
|
func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) ([]actions.ClientOption, error) {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "EphemeralRunnerSetReconciler.actionsClientOptionsFor")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var opts []actions.ClientOption
|
var opts []actions.ClientOption
|
||||||
if rs.Spec.EphemeralRunnerSpec.Proxy != nil {
|
if rs.Spec.EphemeralRunnerSpec.Proxy != nil {
|
||||||
proxyFunc, err := rs.Spec.EphemeralRunnerSpec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
|
proxyFunc, err := rs.Spec.EphemeralRunnerSpec.Proxy.ProxyFunc(func(s string) (*corev1.Secret, error) {
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ephemeralRunnerSetTestTimeout = time.Second * 10
|
ephemeralRunnerSetTestTimeout = time.Second * 20
|
||||||
ephemeralRunnerSetTestInterval = time.Millisecond * 250
|
ephemeralRunnerSetTestInterval = time.Millisecond * 250
|
||||||
ephemeralRunnerSetTestGitHubToken = "gh_token"
|
ephemeralRunnerSetTestGitHubToken = "gh_token"
|
||||||
)
|
)
|
||||||
@@ -794,7 +794,6 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return len(runnerList.Items), nil
|
return len(runnerList.Items), nil
|
||||||
|
|
||||||
},
|
},
|
||||||
ephemeralRunnerSetTestTimeout,
|
ephemeralRunnerSetTestTimeout,
|
||||||
ephemeralRunnerSetTestInterval,
|
ephemeralRunnerSetTestInterval,
|
||||||
@@ -1359,7 +1358,7 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
|
|||||||
return proxySuccessfulllyCalled
|
return proxySuccessfulllyCalled
|
||||||
},
|
},
|
||||||
2*time.Second,
|
2*time.Second,
|
||||||
interval,
|
ephemeralRunnerInterval,
|
||||||
).Should(BeEquivalentTo(true))
|
).Should(BeEquivalentTo(true))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
90
controllers/actions.github.com/options.go
Normal file
90
controllers/actions.github.com/options.go
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
package actionsgithubcom
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Options is the optional configuration for the controllers, which can be
|
||||||
|
// set via command-line flags or environment variables.
|
||||||
|
type Options struct {
|
||||||
|
// RunnerMaxConcuncurrentReconciles is the maximum number of concurrent Reconciles which can be run
|
||||||
|
// by the EphemeralRunnerController.
|
||||||
|
RunnerMaxConcuncurrentReconciles int
|
||||||
|
}
|
||||||
|
|
||||||
|
// OptionsWithDefault returns the default options.
|
||||||
|
// This is here to maintain the options and their default values in one place,
|
||||||
|
// rather than having to correlate those in multiple places.
|
||||||
|
func OptionsWithDefault() Options {
|
||||||
|
return Options{
|
||||||
|
RunnerMaxConcuncurrentReconciles: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadEnv loads the options from the environment variables.
|
||||||
|
// This updates the option value only if the environment variable is set.
|
||||||
|
// If the option is already set (via a command-line flag), the value from the environment variable takes precedence.
|
||||||
|
func (o *Options) LoadEnv() error {
|
||||||
|
v, err := o.getEnvInt("RUNNER_MAX_CONCURRENT_RECONCILES")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if v != nil {
|
||||||
|
o.RunnerMaxConcuncurrentReconciles = *v
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *Options) getEnvInt(name string) (*int, error) {
|
||||||
|
s := os.Getenv(name)
|
||||||
|
if s == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := strconv.Atoi(s)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to convert %s=%s to int: %w", name, s, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Option func(*controller.Options)
|
||||||
|
|
||||||
|
// WithMaxConcurrentReconciles sets the maximum number of concurrent Reconciles which can be run.
|
||||||
|
//
|
||||||
|
// This is useful to improve the throughput of the controller, but it may also increase the load on the API server and
|
||||||
|
// the external service (e.g. GitHub API). The default value is 1, as defined by the controller-runtime.
|
||||||
|
//
|
||||||
|
// See https://github.com/actions/actions-runner-controller/issues/3021 for more information
|
||||||
|
// on real-world use cases and the potential impact of this option.
|
||||||
|
func WithMaxConcurrentReconciles(n int) Option {
|
||||||
|
return func(b *controller.Options) {
|
||||||
|
b.MaxConcurrentReconciles = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// builderWithOptions applies the given options to the provided builder, if any.
|
||||||
|
// This is a helper function to avoid the need to import the controller-runtime package in every reconciler source file
|
||||||
|
// and the command package that creates the controller.
|
||||||
|
// This is also useful for reducing code duplication around setting controller options in
|
||||||
|
// multiple reconcilers.
|
||||||
|
func builderWithOptions(b *builder.Builder, opts []Option) *builder.Builder {
|
||||||
|
if len(opts) == 0 {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
var controllerOpts controller.Options
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&controllerOpts)
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.WithOptions(controllerOpts)
|
||||||
|
}
|
||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||||
"github.com/actions/actions-runner-controller/build"
|
"github.com/actions/actions-runner-controller/build"
|
||||||
@@ -15,7 +16,6 @@ import (
|
|||||||
"github.com/actions/actions-runner-controller/github/actions"
|
"github.com/actions/actions-runner-controller/github/actions"
|
||||||
"github.com/actions/actions-runner-controller/hash"
|
"github.com/actions/actions-runner-controller/hash"
|
||||||
"github.com/actions/actions-runner-controller/logging"
|
"github.com/actions/actions-runner-controller/logging"
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
rbacv1 "k8s.io/api/rbac/v1"
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@@ -69,9 +69,11 @@ func SetListenerEntrypoint(entrypoint string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type resourceBuilder struct{}
|
type ResourceBuilder struct {
|
||||||
|
ExcludeLabelPropagationPrefixes []string
|
||||||
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
func (b *ResourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
||||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -86,7 +88,7 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
|
|||||||
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
|
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
|
||||||
}
|
}
|
||||||
|
|
||||||
labels := mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
|
labels := b.mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
|
||||||
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
|
||||||
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
|
||||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||||
@@ -151,7 +153,7 @@ func (lm *listenerMetricsServerConfig) containerPort() (corev1.ContainerPort, er
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
|
func (b *ResourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, cert string) (*corev1.Secret, error) {
|
||||||
var (
|
var (
|
||||||
metricsAddr = ""
|
metricsAddr = ""
|
||||||
metricsEndpoint = ""
|
metricsEndpoint = ""
|
||||||
@@ -214,7 +216,7 @@ func (b *resourceBuilder) newScaleSetListenerConfig(autoscalingListener *v1alpha
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
|
func (b *ResourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, podConfig *corev1.Secret, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, metricsConfig *listenerMetricsServerConfig, envs ...corev1.EnvVar) (*corev1.Pod, error) {
|
||||||
listenerEnv := []corev1.EnvVar{
|
listenerEnv := []corev1.EnvVar{
|
||||||
{
|
{
|
||||||
Name: "LISTENER_CONFIG_PATH",
|
Name: "LISTENER_CONFIG_PATH",
|
||||||
@@ -407,12 +409,12 @@ func mergeListenerContainer(base, from *corev1.Container) {
|
|||||||
base.TTY = from.TTY
|
base.TTY = from.TTY
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
|
func (b *ResourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
|
||||||
return &corev1.ServiceAccount{
|
return &corev1.ServiceAccount{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: scaleSetListenerServiceAccountName(autoscalingListener),
|
Name: scaleSetListenerServiceAccountName(autoscalingListener),
|
||||||
Namespace: autoscalingListener.Namespace,
|
Namespace: autoscalingListener.Namespace,
|
||||||
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
|
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||||
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||||
}),
|
}),
|
||||||
@@ -420,14 +422,14 @@ func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.AutoscalingListener) *rbacv1.Role {
|
func (b *ResourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.AutoscalingListener) *rbacv1.Role {
|
||||||
rules := rulesForListenerRole([]string{autoscalingListener.Spec.EphemeralRunnerSetName})
|
rules := rulesForListenerRole([]string{autoscalingListener.Spec.EphemeralRunnerSetName})
|
||||||
rulesHash := hash.ComputeTemplateHash(&rules)
|
rulesHash := hash.ComputeTemplateHash(&rules)
|
||||||
newRole := &rbacv1.Role{
|
newRole := &rbacv1.Role{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: scaleSetListenerRoleName(autoscalingListener),
|
Name: scaleSetListenerRoleName(autoscalingListener),
|
||||||
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||||
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
|
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||||
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||||
labelKeyListenerNamespace: autoscalingListener.Namespace,
|
labelKeyListenerNamespace: autoscalingListener.Namespace,
|
||||||
@@ -441,7 +443,7 @@ func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.
|
|||||||
return newRole
|
return newRole
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount) *rbacv1.RoleBinding {
|
func (b *ResourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount) *rbacv1.RoleBinding {
|
||||||
roleRef := rbacv1.RoleRef{
|
roleRef := rbacv1.RoleRef{
|
||||||
Kind: "Role",
|
Kind: "Role",
|
||||||
Name: listenerRole.Name,
|
Name: listenerRole.Name,
|
||||||
@@ -461,7 +463,7 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: scaleSetListenerRoleName(autoscalingListener),
|
Name: scaleSetListenerRoleName(autoscalingListener),
|
||||||
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||||
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
|
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||||
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||||
labelKeyListenerNamespace: autoscalingListener.Namespace,
|
labelKeyListenerNamespace: autoscalingListener.Namespace,
|
||||||
@@ -477,14 +479,14 @@ func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1
|
|||||||
return newRoleBinding
|
return newRoleBinding
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
|
func (b *ResourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
|
||||||
dataHash := hash.ComputeTemplateHash(&secret.Data)
|
dataHash := hash.ComputeTemplateHash(&secret.Data)
|
||||||
|
|
||||||
newListenerSecret := &corev1.Secret{
|
newListenerSecret := &corev1.Secret{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: scaleSetListenerSecretMirrorName(autoscalingListener),
|
Name: scaleSetListenerSecretMirrorName(autoscalingListener),
|
||||||
Namespace: autoscalingListener.Namespace,
|
Namespace: autoscalingListener.Namespace,
|
||||||
Labels: mergeLabels(autoscalingListener.Labels, map[string]string{
|
Labels: b.mergeLabels(autoscalingListener.Labels, map[string]string{
|
||||||
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
LabelKeyGitHubScaleSetNamespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||||
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
LabelKeyGitHubScaleSetName: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||||
"secret-data-hash": dataHash,
|
"secret-data-hash": dataHash,
|
||||||
@@ -496,14 +498,14 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
|
|||||||
return newListenerSecret
|
return newListenerSecret
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
func (b *ResourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
||||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||||
|
|
||||||
labels := mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
|
labels := b.mergeLabels(autoscalingRunnerSet.Labels, map[string]string{
|
||||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||||
LabelKeyKubernetesComponent: "runner-set",
|
LabelKeyKubernetesComponent: "runner-set",
|
||||||
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
|
||||||
@@ -516,7 +518,6 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
|
|||||||
}
|
}
|
||||||
|
|
||||||
newAnnotations := map[string]string{
|
newAnnotations := map[string]string{
|
||||||
|
|
||||||
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
|
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
|
||||||
AnnotationKeyGitHubRunnerScaleSetName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName],
|
AnnotationKeyGitHubRunnerScaleSetName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName],
|
||||||
annotationKeyRunnerSpecHash: runnerSpecHash,
|
annotationKeyRunnerSpecHash: runnerSpecHash,
|
||||||
@@ -546,7 +547,7 @@ func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.A
|
|||||||
return newEphemeralRunnerSet, nil
|
return newEphemeralRunnerSet, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
|
func (b *ResourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
|
||||||
labels := make(map[string]string)
|
labels := make(map[string]string)
|
||||||
for k, v := range ephemeralRunnerSet.Labels {
|
for k, v := range ephemeralRunnerSet.Labels {
|
||||||
if k == LabelKeyKubernetesComponent {
|
if k == LabelKeyKubernetesComponent {
|
||||||
@@ -573,10 +574,7 @@ func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.Epheme
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
|
func (b *ResourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
|
||||||
ctx, span := otel.Tracer("arc").Start(ctx, "resourceBuilder.newEphemeralRunnerPod")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var newPod corev1.Pod
|
var newPod corev1.Pod
|
||||||
|
|
||||||
labels := map[string]string{}
|
labels := map[string]string{}
|
||||||
@@ -644,7 +642,7 @@ func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1a
|
|||||||
return &newPod
|
return &newPod
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *resourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret {
|
func (b *ResourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret {
|
||||||
return &corev1.Secret{
|
return &corev1.Secret{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: ephemeralRunner.Name,
|
Name: ephemeralRunner.Name,
|
||||||
@@ -752,14 +750,26 @@ func trimLabelValue(val string) string {
|
|||||||
return val
|
return val
|
||||||
}
|
}
|
||||||
|
|
||||||
func mergeLabels(base, overwrite map[string]string) map[string]string {
|
func (b *ResourceBuilder) mergeLabels(base, overwrite map[string]string) map[string]string {
|
||||||
mergedLabels := map[string]string{}
|
mergedLabels := make(map[string]string, len(base))
|
||||||
|
|
||||||
|
base:
|
||||||
for k, v := range base {
|
for k, v := range base {
|
||||||
|
for _, prefix := range b.ExcludeLabelPropagationPrefixes {
|
||||||
|
if strings.HasPrefix(k, prefix) {
|
||||||
|
continue base
|
||||||
|
}
|
||||||
|
}
|
||||||
mergedLabels[k] = v
|
mergedLabels[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
overwrite:
|
||||||
for k, v := range overwrite {
|
for k, v := range overwrite {
|
||||||
|
for _, prefix := range b.ExcludeLabelPropagationPrefixes {
|
||||||
|
if strings.HasPrefix(k, prefix) {
|
||||||
|
continue overwrite
|
||||||
|
}
|
||||||
|
}
|
||||||
mergedLabels[k] = v
|
mergedLabels[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,9 +19,13 @@ func TestLabelPropagation(t *testing.T) {
|
|||||||
Name: "test-scale-set",
|
Name: "test-scale-set",
|
||||||
Namespace: "test-ns",
|
Namespace: "test-ns",
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
|
||||||
LabelKeyKubernetesVersion: "0.2.0",
|
LabelKeyKubernetesVersion: "0.2.0",
|
||||||
"arbitrary-label": "random-value",
|
"arbitrary-label": "random-value",
|
||||||
|
"example.com/label": "example-value",
|
||||||
|
"example.com/example": "example-value",
|
||||||
|
"directly.excluded.org/label": "excluded-value",
|
||||||
|
"directly.excluded.org/arbitrary": "not-excluded-value",
|
||||||
},
|
},
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
runnerScaleSetIdAnnotationKey: "1",
|
runnerScaleSetIdAnnotationKey: "1",
|
||||||
@@ -34,7 +38,12 @@ func TestLabelPropagation(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var b resourceBuilder
|
b := ResourceBuilder{
|
||||||
|
ExcludeLabelPropagationPrefixes: []string{
|
||||||
|
"example.com/",
|
||||||
|
"directly.excluded.org/label",
|
||||||
|
},
|
||||||
|
}
|
||||||
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(&autoscalingRunnerSet)
|
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(&autoscalingRunnerSet)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf])
|
assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf])
|
||||||
@@ -63,6 +72,11 @@ func TestLabelPropagation(t *testing.T) {
|
|||||||
assert.Equal(t, "repo", listener.Labels[LabelKeyGitHubRepository])
|
assert.Equal(t, "repo", listener.Labels[LabelKeyGitHubRepository])
|
||||||
assert.Equal(t, autoscalingRunnerSet.Labels["arbitrary-label"], listener.Labels["arbitrary-label"])
|
assert.Equal(t, autoscalingRunnerSet.Labels["arbitrary-label"], listener.Labels["arbitrary-label"])
|
||||||
|
|
||||||
|
assert.NotContains(t, listener.Labels, "example.com/label")
|
||||||
|
assert.NotContains(t, listener.Labels, "example.com/example")
|
||||||
|
assert.NotContains(t, listener.Labels, "directly.excluded.org/label")
|
||||||
|
assert.Equal(t, "not-excluded-value", listener.Labels["directly.excluded.org/arbitrary"])
|
||||||
|
|
||||||
listenerServiceAccount := &corev1.ServiceAccount{
|
listenerServiceAccount := &corev1.ServiceAccount{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "test",
|
Name: "test",
|
||||||
@@ -128,7 +142,7 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
|
|||||||
GitHubConfigUrl: fmt.Sprintf("https://github.com/%s/%s", organization, repository),
|
GitHubConfigUrl: fmt.Sprintf("https://github.com/%s/%s", organization, repository),
|
||||||
}
|
}
|
||||||
|
|
||||||
var b resourceBuilder
|
var b ResourceBuilder
|
||||||
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
|
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 0)
|
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 0)
|
||||||
@@ -152,7 +166,7 @@ func TestGitHubURLTrimLabelValues(t *testing.T) {
|
|||||||
GitHubConfigUrl: fmt.Sprintf("https://github.com/enterprises/%s", enterprise),
|
GitHubConfigUrl: fmt.Sprintf("https://github.com/enterprises/%s", enterprise),
|
||||||
}
|
}
|
||||||
|
|
||||||
var b resourceBuilder
|
var b ResourceBuilder
|
||||||
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
|
ephemeralRunnerSet, err := b.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 63)
|
assert.Len(t, ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise], 63)
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ eliminate some duplication:
|
|||||||
`-coverprofile` flags: while `-short` is used to skip [old ARC E2E
|
`-coverprofile` flags: while `-short` is used to skip [old ARC E2E
|
||||||
tests](https://github.com/actions/actions-runner-controller/blob/master/test/e2e/e2e_test.go#L85-L87),
|
tests](https://github.com/actions/actions-runner-controller/blob/master/test/e2e/e2e_test.go#L85-L87),
|
||||||
`-coverprofile` is adding to the test time without really giving us any value
|
`-coverprofile` is adding to the test time without really giving us any value
|
||||||
in return. We should also start using `actions/setup-go@v4` to take advantage
|
in return. We should also start using `actions/setup-go@v5` to take advantage
|
||||||
of caching (it would speed up our tests by a lot) or enable it on `v3` if we
|
of caching (it would speed up our tests by a lot) or enable it on `v3` if we
|
||||||
have a strong reason not to upgrade. We should keep ignoring our E2E tests too
|
have a strong reason not to upgrade. We should keep ignoring our E2E tests too
|
||||||
as those will be run elsewhere (either use `Short` there too or ignoring the
|
as those will be run elsewhere (either use `Short` there too or ignoring the
|
||||||
|
|||||||
@@ -43,6 +43,16 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
|
|||||||
|
|
||||||
## Changelog
|
## Changelog
|
||||||
|
|
||||||
|
### v0.9.3
|
||||||
|
|
||||||
|
1. AutoscalingListener controller: Inspect listener container state instead of pod phase [#3548](https://github.com/actions/actions-runner-controller/pull/3548)
|
||||||
|
1. Exclude label prefix propagation [#3607](https://github.com/actions/actions-runner-controller/pull/3607)
|
||||||
|
1. Check status code of fetch access token for github app [#3568](https://github.com/actions/actions-runner-controller/pull/3568)
|
||||||
|
1. Remove .Named() from the ephemeral runner controller [#3596](https://github.com/actions/actions-runner-controller/pull/3596)
|
||||||
|
1. Customize work directory [#3477](https://github.com/actions/actions-runner-controller/pull/3477)
|
||||||
|
1. Fix problem with ephemeralRunner Succeeded state before build executed [#3528](https://github.com/actions/actions-runner-controller/pull/3528)
|
||||||
|
1. Remove finalizers in one pass to speed up cleanups AutoscalingRunnerSet [#3536](https://github.com/actions/actions-runner-controller/pull/3536)
|
||||||
|
|
||||||
### v0.9.2
|
### v0.9.2
|
||||||
|
|
||||||
1. Refresh session if token expires during delete message [#3529](https://github.com/actions/actions-runner-controller/pull/3529)
|
1. Refresh session if token expires during delete message [#3529](https://github.com/actions/actions-runner-controller/pull/3529)
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -1054,6 +1055,14 @@ func (c *Client) fetchAccessToken(ctx context.Context, gitHubConfigURL string, c
|
|||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusCreated {
|
||||||
|
return nil, &GitHubAPIError{
|
||||||
|
StatusCode: resp.StatusCode,
|
||||||
|
RequestID: resp.Header.Get(HeaderGitHubRequestID),
|
||||||
|
Err: fmt.Errorf("failed to get access token for GitHub App auth: %v", resp.Status),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Format: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app
|
// Format: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app
|
||||||
var accessToken *accessToken
|
var accessToken *accessToken
|
||||||
if err = json.NewDecoder(resp.Body).Decode(&accessToken); err != nil {
|
if err = json.NewDecoder(resp.Body).Decode(&accessToken); err != nil {
|
||||||
@@ -1131,15 +1140,30 @@ func (c *Client) getActionsServiceAdminConnection(ctx context.Context, rt *regis
|
|||||||
}
|
}
|
||||||
|
|
||||||
retry++
|
retry++
|
||||||
if retry > 3 {
|
if retry > 5 {
|
||||||
return nil, fmt.Errorf("unable to register runner after 3 retries: %w", &GitHubAPIError{
|
return nil, fmt.Errorf("unable to register runner after 3 retries: %w", &GitHubAPIError{
|
||||||
StatusCode: resp.StatusCode,
|
StatusCode: resp.StatusCode,
|
||||||
RequestID: resp.Header.Get(HeaderGitHubRequestID),
|
RequestID: resp.Header.Get(HeaderGitHubRequestID),
|
||||||
Err: innerErr,
|
Err: innerErr,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
time.Sleep(time.Duration(500 * int(time.Millisecond) * (retry + 1)))
|
// Add exponential backoff + jitter to avoid thundering herd
|
||||||
|
// This will generate a backoff schedule:
|
||||||
|
// 1: 1s
|
||||||
|
// 2: 3s
|
||||||
|
// 3: 4s
|
||||||
|
// 4: 8s
|
||||||
|
// 5: 17s
|
||||||
|
baseDelay := 500 * time.Millisecond
|
||||||
|
jitter := time.Duration(rand.Intn(1000))
|
||||||
|
maxDelay := 20 * time.Second
|
||||||
|
delay := baseDelay*(1<<retry) + jitter
|
||||||
|
|
||||||
|
if delay > maxDelay {
|
||||||
|
delay = maxDelay
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(delay)
|
||||||
}
|
}
|
||||||
|
|
||||||
var actionsServiceAdminConnection *ActionsServiceAdminConnection
|
var actionsServiceAdminConnection *ActionsServiceAdminConnection
|
||||||
|
|||||||
@@ -170,7 +170,7 @@ func TestNewActionsServiceRequest(t *testing.T) {
|
|||||||
}
|
}
|
||||||
failures := 0
|
failures := 0
|
||||||
unauthorizedHandler := func(w http.ResponseWriter, r *http.Request) {
|
unauthorizedHandler := func(w http.ResponseWriter, r *http.Request) {
|
||||||
if failures < 2 {
|
if failures < 5 {
|
||||||
failures++
|
failures++
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
w.WriteHeader(http.StatusUnauthorized)
|
w.WriteHeader(http.StatusUnauthorized)
|
||||||
|
|||||||
43
go.mod
43
go.mod
@@ -1,10 +1,10 @@
|
|||||||
module github.com/actions/actions-runner-controller
|
module github.com/actions/actions-runner-controller
|
||||||
|
|
||||||
go 1.22.1
|
go 1.22.4
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0
|
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/evanphx/json-patch v5.9.0+incompatible
|
github.com/evanphx/json-patch v5.9.0+incompatible
|
||||||
github.com/go-logr/logr v1.4.1
|
github.com/go-logr/logr v1.4.1
|
||||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||||
@@ -14,7 +14,7 @@ require (
|
|||||||
github.com/gorilla/mux v1.8.1
|
github.com/gorilla/mux v1.8.1
|
||||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
|
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
|
||||||
github.com/gruntwork-io/terratest v0.46.7
|
github.com/gruntwork-io/terratest v0.46.7
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.5
|
github.com/hashicorp/go-retryablehttp v0.7.7
|
||||||
github.com/kelseyhightower/envconfig v1.4.0
|
github.com/kelseyhightower/envconfig v1.4.0
|
||||||
github.com/onsi/ginkgo v1.16.5
|
github.com/onsi/ginkgo v1.16.5
|
||||||
github.com/onsi/ginkgo/v2 v2.17.1
|
github.com/onsi/ginkgo/v2 v2.17.1
|
||||||
@@ -23,17 +23,12 @@ require (
|
|||||||
github.com/prometheus/client_golang v1.17.0
|
github.com/prometheus/client_golang v1.17.0
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
github.com/teambition/rrule-go v1.8.2
|
github.com/teambition/rrule-go v1.8.2
|
||||||
go.opentelemetry.io/otel v1.27.0
|
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.3.0
|
|
||||||
go.opentelemetry.io/otel/log v0.3.0
|
|
||||||
go.opentelemetry.io/otel/sdk/log v0.3.0
|
|
||||||
go.uber.org/multierr v1.11.0
|
go.uber.org/multierr v1.11.0
|
||||||
go.uber.org/zap v1.27.0
|
go.uber.org/zap v1.27.0
|
||||||
golang.org/x/net v0.24.0
|
golang.org/x/net v0.24.0
|
||||||
golang.org/x/oauth2 v0.19.0
|
golang.org/x/oauth2 v0.19.0
|
||||||
golang.org/x/sync v0.7.0
|
golang.org/x/sync v0.7.0
|
||||||
gomodules.xyz/jsonpatch/v2 v2.4.0
|
gomodules.xyz/jsonpatch/v2 v2.4.0
|
||||||
gopkg.in/DataDog/dd-trace-go.v1 v1.65.1
|
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
k8s.io/api v0.28.4
|
k8s.io/api v0.28.4
|
||||||
k8s.io/apimachinery v0.28.4
|
k8s.io/apimachinery v0.28.4
|
||||||
@@ -43,34 +38,23 @@ require (
|
|||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/DataDog/appsec-internal-go v1.6.0 // indirect
|
|
||||||
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 // indirect
|
|
||||||
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1 // indirect
|
|
||||||
github.com/DataDog/datadog-go/v5 v5.3.0 // indirect
|
|
||||||
github.com/DataDog/go-libddwaf/v3 v3.2.1 // indirect
|
|
||||||
github.com/DataDog/go-tuf v1.0.2-0.5.2 // indirect
|
|
||||||
github.com/DataDog/sketches-go v1.4.5 // indirect
|
|
||||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect
|
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect
|
||||||
github.com/aws/aws-sdk-go v1.44.327 // indirect
|
github.com/aws/aws-sdk-go v1.44.122 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/boombuler/barcode v1.0.1 // indirect
|
github.com/boombuler/barcode v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/cloudflare/circl v1.3.7 // indirect
|
github.com/cloudflare/circl v1.3.7 // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
|
||||||
github.com/ebitengine/purego v0.6.0-alpha.5 // indirect
|
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||||
github.com/evanphx/json-patch/v5 v5.7.0 // indirect
|
github.com/evanphx/json-patch/v5 v5.7.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||||
github.com/ghodss/yaml v1.0.0 // indirect
|
github.com/ghodss/yaml v1.0.0 // indirect
|
||||||
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 // indirect
|
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
|
||||||
github.com/go-logr/zapr v1.3.0 // indirect
|
github.com/go-logr/zapr v1.3.0 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.20.0 // indirect
|
github.com/go-openapi/jsonpointer v0.20.0 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||||
github.com/go-openapi/swag v0.22.4 // indirect
|
github.com/go-openapi/swag v0.22.4 // indirect
|
||||||
github.com/go-sql-driver/mysql v1.6.0 // indirect
|
github.com/go-sql-driver/mysql v1.4.1 // indirect
|
||||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
@@ -81,9 +65,9 @@ require (
|
|||||||
github.com/google/gofuzz v1.2.0 // indirect
|
github.com/google/gofuzz v1.2.0 // indirect
|
||||||
github.com/google/pprof v0.0.0-20231101202521-4ca4178f5c7a // indirect
|
github.com/google/pprof v0.0.0-20231101202521-4ca4178f5c7a // indirect
|
||||||
github.com/gruntwork-io/go-commons v0.8.0 // indirect
|
github.com/gruntwork-io/go-commons v0.8.0 // indirect
|
||||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
github.com/hashicorp/go-multierror v1.1.0 // indirect
|
||||||
github.com/imdario/mergo v0.3.16 // indirect
|
github.com/imdario/mergo v0.3.16 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
@@ -96,32 +80,23 @@ require (
|
|||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/outcaste-io/ristretto v0.2.3 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/philhofer/fwd v1.1.2 // indirect
|
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
|
||||||
github.com/pquerna/otp v1.2.0 // indirect
|
github.com/pquerna/otp v1.2.0 // indirect
|
||||||
github.com/prometheus/client_model v0.5.0 // indirect
|
github.com/prometheus/client_model v0.5.0 // indirect
|
||||||
github.com/prometheus/common v0.45.0 // indirect
|
github.com/prometheus/common v0.45.0 // indirect
|
||||||
github.com/prometheus/procfs v0.12.0 // indirect
|
github.com/prometheus/procfs v0.12.0 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
|
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/stretchr/objx v0.5.2 // indirect
|
github.com/stretchr/objx v0.5.2 // indirect
|
||||||
github.com/tinylib/msgp v1.1.8 // indirect
|
|
||||||
github.com/urfave/cli v1.22.2 // indirect
|
github.com/urfave/cli v1.22.2 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.27.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/sdk v1.27.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/trace v1.27.0 // indirect
|
|
||||||
go.uber.org/atomic v1.11.0 // indirect
|
|
||||||
golang.org/x/crypto v0.22.0 // indirect
|
golang.org/x/crypto v0.22.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
|
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
|
||||||
golang.org/x/mod v0.14.0 // indirect
|
|
||||||
golang.org/x/sys v0.20.0 // indirect
|
golang.org/x/sys v0.20.0 // indirect
|
||||||
golang.org/x/term v0.19.0 // indirect
|
golang.org/x/term v0.19.0 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/text v0.14.0 // indirect
|
||||||
golang.org/x/time v0.4.0 // indirect
|
golang.org/x/time v0.4.0 // indirect
|
||||||
golang.org/x/tools v0.17.0 // indirect
|
golang.org/x/tools v0.17.0 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
google.golang.org/appengine v1.6.8 // indirect
|
||||||
google.golang.org/protobuf v1.33.0 // indirect
|
google.golang.org/protobuf v1.33.0 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
|||||||
146
go.sum
146
go.sum
@@ -1,31 +1,12 @@
|
|||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/DataDog/appsec-internal-go v1.6.0 h1:QHvPOv/O0s2fSI/BraZJNpRDAtdlrRm5APJFZNBxjAw=
|
|
||||||
github.com/DataDog/appsec-internal-go v1.6.0/go.mod h1:pEp8gjfNLtEOmz+iZqC8bXhu0h4k7NUsW/qiQb34k1U=
|
|
||||||
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 h1:bUMSNsw1iofWiju9yc1f+kBd33E3hMJtq9GuU602Iy8=
|
|
||||||
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0/go.mod h1:HzySONXnAgSmIQfL6gOv9hWprKJkx8CicuXuUbmgWfo=
|
|
||||||
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1 h1:5nE6N3JSs2IG3xzMthNFhXfOaXlrsdgqmJ73lndFf8c=
|
|
||||||
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.1/go.mod h1:Vc+snp0Bey4MrrJyiV2tVxxJb6BmLomPvN1RgAvjGaQ=
|
|
||||||
github.com/DataDog/datadog-go/v5 v5.3.0 h1:2q2qjFOb3RwAZNU+ez27ZVDwErJv5/VpbBPprz7Z+s8=
|
|
||||||
github.com/DataDog/datadog-go/v5 v5.3.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q=
|
|
||||||
github.com/DataDog/go-libddwaf/v3 v3.2.1 h1:lZPc6UxCOwioHc++nsldKR50FpIrRh1uGnGLuryqnE8=
|
|
||||||
github.com/DataDog/go-libddwaf/v3 v3.2.1/go.mod h1:AP+7Atb8ftSsrha35wht7+K3R+xuzfVSQhabSO4w6CY=
|
|
||||||
github.com/DataDog/go-tuf v1.0.2-0.5.2 h1:EeZr937eKAWPxJ26IykAdWA4A0jQXJgkhUjqEI/w7+I=
|
|
||||||
github.com/DataDog/go-tuf v1.0.2-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
|
|
||||||
github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4=
|
|
||||||
github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM=
|
|
||||||
github.com/DataDog/sketches-go v1.4.5 h1:ki7VfeNz7IcNafq7yI/j5U/YCkO3LJiMDtXz9OMQbyE=
|
|
||||||
github.com/DataDog/sketches-go v1.4.5/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg=
|
|
||||||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
|
||||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
|
||||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX7IL/m9Y5LO+KQYv+t1CQOiFe6+SV2J7bE=
|
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX7IL/m9Y5LO+KQYv+t1CQOiFe6+SV2J7bE=
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||||
github.com/actions-runner-controller/httpcache v0.2.0 h1:hCNvYuVPJ2xxYBymqBvH0hSiQpqz4PHF/LbU3XghGNI=
|
github.com/actions-runner-controller/httpcache v0.2.0 h1:hCNvYuVPJ2xxYBymqBvH0hSiQpqz4PHF/LbU3XghGNI=
|
||||||
github.com/actions-runner-controller/httpcache v0.2.0/go.mod h1:JLu9/2M/btPz1Zu/vTZ71XzukQHn2YeISPmJoM5exBI=
|
github.com/actions-runner-controller/httpcache v0.2.0/go.mod h1:JLu9/2M/btPz1Zu/vTZ71XzukQHn2YeISPmJoM5exBI=
|
||||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||||
github.com/aws/aws-sdk-go v1.44.327 h1:ZS8oO4+7MOBLhkdwIhgtVeDzCeWOlTfKJS7EgggbIEY=
|
github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo=
|
||||||
github.com/aws/aws-sdk-go v1.44.327/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
@@ -35,7 +16,6 @@ github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl
|
|||||||
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0 h1:yUmoVv70H3J4UOqxqsee39+KlXxNEDfTbAp8c/qULKk=
|
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0 h1:yUmoVv70H3J4UOqxqsee39+KlXxNEDfTbAp8c/qULKk=
|
||||||
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0/go.mod h1:fmPmvCiBWhJla3zDv9ZTQSZc8AbwyRnGW1yg5ep1Pcs=
|
github.com/bradleyfalzon/ghinstallation/v2 v2.8.0/go.mod h1:fmPmvCiBWhJla3zDv9ZTQSZc8AbwyRnGW1yg5ep1Pcs=
|
||||||
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||||
@@ -46,19 +26,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng
|
|||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
|
||||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
|
||||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
|
||||||
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
|
|
||||||
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 h1:8EXxF+tCLqaVk8AOC29zl2mnhQjwyLxxOTuhUazWRsg=
|
|
||||||
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4/go.mod h1:I5sHm0Y0T1u5YjlyqC5GVArM7aNZRUYtTjmJ8mPJFds=
|
|
||||||
github.com/ebitengine/purego v0.6.0-alpha.5 h1:EYID3JOAdmQ4SNZYJHu9V6IqOeRQDBYxqKAg9PyoHFY=
|
|
||||||
github.com/ebitengine/purego v0.6.0-alpha.5/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ=
|
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||||
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
|
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
|
||||||
@@ -66,10 +35,8 @@ github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
|
|||||||
github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
|
github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
|
||||||
github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||||
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
|
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||||
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
|
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
|
||||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||||
@@ -79,12 +46,9 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
|
|||||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||||
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU=
|
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 h1:skJKxRtNmevLqnayafdLe2AsenqRupVmzZSqrvb5caU=
|
||||||
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
|
||||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
|
||||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||||
@@ -95,8 +59,8 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En
|
|||||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||||
@@ -106,7 +70,6 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW
|
|||||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
@@ -115,6 +78,7 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
|
|||||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU=
|
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU=
|
||||||
@@ -147,24 +111,16 @@ github.com/gruntwork-io/go-commons v0.8.0 h1:k/yypwrPqSeYHevLlEDmvmgQzcyTwrlZGRa
|
|||||||
github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78=
|
github.com/gruntwork-io/go-commons v0.8.0/go.mod h1:gtp0yTtIBExIZp7vyIV9I0XQkVwiQZze678hvDXof78=
|
||||||
github.com/gruntwork-io/terratest v0.46.7 h1:oqGPBBO87SEsvBYaA0R5xOq+Lm2Xc5dmFVfxEolfZeU=
|
github.com/gruntwork-io/terratest v0.46.7 h1:oqGPBBO87SEsvBYaA0R5xOq+Lm2Xc5dmFVfxEolfZeU=
|
||||||
github.com/gruntwork-io/terratest v0.46.7/go.mod h1:6gI5MlLeyF+SLwqocA5GBzcTix+XiuxCy1BPwKuT+WM=
|
github.com/gruntwork-io/terratest v0.46.7/go.mod h1:6gI5MlLeyF+SLwqocA5GBzcTix+XiuxCy1BPwKuT+WM=
|
||||||
|
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
|
||||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||||
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
|
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||||
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
|
||||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
|
||||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
|
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||||
github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
|
||||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs=
|
|
||||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
|
|
||||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
|
|
||||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
|
|
||||||
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
|
|
||||||
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||||
@@ -204,8 +160,6 @@ github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvls
|
|||||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
|
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
|
||||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
|
||||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
|
||||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
@@ -227,17 +181,10 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
|
|||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE=
|
github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE=
|
||||||
github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY=
|
github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY=
|
||||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
|
||||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
|
||||||
github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0=
|
|
||||||
github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac=
|
|
||||||
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
|
|
||||||
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
|
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/pquerna/otp v1.2.0 h1:/A3+Jn+cagqayeR3iHs/L62m5ue7710D35zl1zJ1kok=
|
github.com/pquerna/otp v1.2.0 h1:/A3+Jn+cagqayeR3iHs/L62m5ue7710D35zl1zJ1kok=
|
||||||
github.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
|
github.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
|
||||||
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
|
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
|
||||||
@@ -248,22 +195,13 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne
|
|||||||
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
|
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
|
||||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||||
github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 h1:4+LEVOB87y175cLJC/mbsgKmoDOjrBldtXvioEy96WY=
|
|
||||||
github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3/go.mod h1:vl5+MqJ1nBINuSsUI2mGgH79UweUT/B5Fy8857PqyyI=
|
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
|
|
||||||
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
|
||||||
github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg=
|
|
||||||
github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI=
|
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
|
||||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
|
||||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
@@ -277,7 +215,6 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
|||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
@@ -285,33 +222,11 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
|
|||||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/teambition/rrule-go v1.8.2 h1:lIjpjvWTj9fFUZCmuoVDrKVOtdiyzbzc93qTmRVe/J8=
|
github.com/teambition/rrule-go v1.8.2 h1:lIjpjvWTj9fFUZCmuoVDrKVOtdiyzbzc93qTmRVe/J8=
|
||||||
github.com/teambition/rrule-go v1.8.2/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4=
|
github.com/teambition/rrule-go v1.8.2/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4=
|
||||||
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
|
|
||||||
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
|
|
||||||
github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
|
github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
|
||||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
|
||||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI=
|
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48=
|
|
||||||
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
|
|
||||||
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
|
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.3.0 h1:6aGq6rMOdOx9B385JpF1OpeL18+6Ho8bTFdxy10oEGY=
|
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.3.0/go.mod h1:fdZI+pB2Y6Dpl3Uf+1ZPrkX6cnwsUAhjK1f9yCAlJIM=
|
|
||||||
go.opentelemetry.io/otel/log v0.3.0 h1:kJRFkpUFYtny37NQzL386WbznUByZx186DpEMKhEGZs=
|
|
||||||
go.opentelemetry.io/otel/log v0.3.0/go.mod h1:ziCwqZr9soYDwGNbIL+6kAvQC+ANvjgG367HVcyR/ys=
|
|
||||||
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
|
|
||||||
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
|
|
||||||
go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI=
|
|
||||||
go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
|
|
||||||
go.opentelemetry.io/otel/sdk/log v0.3.0 h1:GEjJ8iftz2l+XO1GF2856r7yYVh74URiF9JMcAacr5U=
|
|
||||||
go.opentelemetry.io/otel/sdk/log v0.3.0/go.mod h1:BwCxtmux6ACLuys1wlbc0+vGBd+xytjmjajwqqIul2g=
|
|
||||||
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
|
|
||||||
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
|
|
||||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
|
||||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
|
||||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||||
@@ -330,12 +245,8 @@ golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8
|
|||||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
|
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
|
||||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
|
||||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
@@ -343,11 +254,9 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
|
||||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||||
@@ -358,7 +267,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||||
@@ -376,14 +284,10 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
@@ -392,9 +296,7 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
|||||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
|
||||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||||
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
|
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
|
||||||
@@ -402,8 +304,8 @@ golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
|
|||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
|
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||||
@@ -415,9 +317,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
|||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|
||||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
|
||||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||||
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
|
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
|
||||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||||
@@ -425,10 +325,10 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
|||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
|
||||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
|
||||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||||
|
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||||
|
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
@@ -439,8 +339,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
|
|||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||||
gopkg.in/DataDog/dd-trace-go.v1 v1.65.1 h1:Ne7kzWr/br/jwhUJR7CnqPl/mUpNxa6LfgZs0S4htZM=
|
|
||||||
gopkg.in/DataDog/dd-trace-go.v1 v1.65.1/go.mod h1:beNFIWd/H04d0k96cfltgiDH2+t0T5sDbyYLF3VTXqk=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
@@ -457,8 +355,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/gotraceui v0.2.0 h1:dmNsfQ9Vl3GwbiVD7Z8d/osC6WtGGrasyrC2suc4ZIQ=
|
|
||||||
honnef.co/go/gotraceui v0.2.0/go.mod h1:qHo4/W75cA3bX0QQoSvDjbJa4R8mAyyFjbWAj63XElc=
|
|
||||||
k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY=
|
k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY=
|
||||||
k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0=
|
k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0=
|
||||||
k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08=
|
k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08=
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
git reset --hard origin/master
|
|
||||||
|
|
||||||
go get go.opentelemetry.io/otel
|
|
||||||
go get gopkg.in/DataDog/dd-trace-go.v1
|
|
||||||
go get github.com/davecgh/go-spew
|
|
||||||
go get github.com/pmezard/go-difflib
|
|
||||||
|
|
||||||
find . -name "*.go" | grep -E '(cmd/ghalistener|cmd/githubrunnerscalesetlistener|controllers/actions.github.com)' | xargs -I{} go-instrument -app arc -w -filename {}
|
|
||||||
|
|
||||||
git add -u
|
|
||||||
git commit -m 'chore: goinstrument'
|
|
||||||
|
|
||||||
git cherry-pick a6d6f3e
|
|
||||||
47
main.go
47
main.go
@@ -94,13 +94,16 @@ func main() {
|
|||||||
runnerImagePullSecrets stringSlice
|
runnerImagePullSecrets stringSlice
|
||||||
runnerPodDefaults actionssummerwindnet.RunnerPodDefaults
|
runnerPodDefaults actionssummerwindnet.RunnerPodDefaults
|
||||||
|
|
||||||
namespace string
|
namespace string
|
||||||
logLevel string
|
logLevel string
|
||||||
logFormat string
|
logFormat string
|
||||||
watchSingleNamespace string
|
watchSingleNamespace string
|
||||||
|
excludeLabelPropagationPrefixes stringSlice
|
||||||
|
|
||||||
autoScalerImagePullSecrets stringSlice
|
autoScalerImagePullSecrets stringSlice
|
||||||
|
|
||||||
|
opts = actionsgithubcom.OptionsWithDefault()
|
||||||
|
|
||||||
commonRunnerLabels commaSeparatedStringSlice
|
commonRunnerLabels commaSeparatedStringSlice
|
||||||
)
|
)
|
||||||
var c github.Config
|
var c github.Config
|
||||||
@@ -135,9 +138,11 @@ func main() {
|
|||||||
flag.DurationVar(&defaultScaleDownDelay, "default-scale-down-delay", actionssummerwindnet.DefaultScaleDownDelay, "The approximate delay for a scale down followed by a scale up, used to prevent flapping (down->up->down->... loop)")
|
flag.DurationVar(&defaultScaleDownDelay, "default-scale-down-delay", actionssummerwindnet.DefaultScaleDownDelay, "The approximate delay for a scale down followed by a scale up, used to prevent flapping (down->up->down->... loop)")
|
||||||
flag.IntVar(&port, "port", 9443, "The port to which the admission webhook endpoint should bind")
|
flag.IntVar(&port, "port", 9443, "The port to which the admission webhook endpoint should bind")
|
||||||
flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.")
|
flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.")
|
||||||
|
flag.IntVar(&opts.RunnerMaxConcuncurrentReconciles, "runner-max-concurrent-reconciles", opts.RunnerMaxConcuncurrentReconciles, "The maximum number of concurrent reconciles which can be run by the EphemeralRunner controller. Increase this value to improve the throughput of the controller, but it may also increase the load on the API server and the external service (e.g. GitHub API).")
|
||||||
flag.Var(&commonRunnerLabels, "common-runner-labels", "Runner labels in the K1=V1,K2=V2,... format that are inherited all the runners created by the controller. See https://github.com/actions/actions-runner-controller/issues/321 for more information")
|
flag.Var(&commonRunnerLabels, "common-runner-labels", "Runner labels in the K1=V1,K2=V2,... format that are inherited all the runners created by the controller. See https://github.com/actions/actions-runner-controller/issues/321 for more information")
|
||||||
flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.")
|
flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.")
|
||||||
flag.StringVar(&watchSingleNamespace, "watch-single-namespace", "", "Restrict to watch for custom resources in a single namespace.")
|
flag.StringVar(&watchSingleNamespace, "watch-single-namespace", "", "Restrict to watch for custom resources in a single namespace.")
|
||||||
|
flag.Var(&excludeLabelPropagationPrefixes, "exclude-label-propagation-prefix", "The list of prefixes that should be excluded from label propagation")
|
||||||
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
|
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
|
||||||
flag.StringVar(&logFormat, "log-format", "text", `The log format. Valid options are "text" and "json". Defaults to "text"`)
|
flag.StringVar(&logFormat, "log-format", "text", `The log format. Valid options are "text" and "json". Defaults to "text"`)
|
||||||
flag.BoolVar(&autoScalingRunnerSetOnly, "auto-scaling-runner-set-only", false, "Make controller only reconcile AutoRunnerScaleSet object.")
|
flag.BoolVar(&autoScalingRunnerSetOnly, "auto-scaling-runner-set-only", false, "Make controller only reconcile AutoRunnerScaleSet object.")
|
||||||
@@ -154,6 +159,12 @@ func main() {
|
|||||||
}
|
}
|
||||||
c.Log = &log
|
c.Log = &log
|
||||||
|
|
||||||
|
if err := opts.LoadEnv(); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error: loading environment variables: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
log.Info("Using options", "runner-max-concurrent-reconciles", opts.RunnerMaxConcuncurrentReconciles)
|
||||||
|
|
||||||
if !autoScalingRunnerSetOnly {
|
if !autoScalingRunnerSetOnly {
|
||||||
ghClient, err = c.NewClient()
|
ghClient, err = c.NewClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -258,6 +269,10 @@ func main() {
|
|||||||
log.WithName("actions-clients"),
|
log.WithName("actions-clients"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
rb := actionsgithubcom.ResourceBuilder{
|
||||||
|
ExcludeLabelPropagationPrefixes: excludeLabelPropagationPrefixes,
|
||||||
|
}
|
||||||
|
|
||||||
if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{
|
if err = (&actionsgithubcom.AutoscalingRunnerSetReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Log: log.WithName("AutoscalingRunnerSet").WithValues("version", build.Version),
|
Log: log.WithName("AutoscalingRunnerSet").WithValues("version", build.Version),
|
||||||
@@ -267,27 +282,30 @@ func main() {
|
|||||||
ActionsClient: actionsMultiClient,
|
ActionsClient: actionsMultiClient,
|
||||||
UpdateStrategy: actionsgithubcom.UpdateStrategy(updateStrategy),
|
UpdateStrategy: actionsgithubcom.UpdateStrategy(updateStrategy),
|
||||||
DefaultRunnerScaleSetListenerImagePullSecrets: autoScalerImagePullSecrets,
|
DefaultRunnerScaleSetListenerImagePullSecrets: autoScalerImagePullSecrets,
|
||||||
|
ResourceBuilder: rb,
|
||||||
}).SetupWithManager(mgr); err != nil {
|
}).SetupWithManager(mgr); err != nil {
|
||||||
log.Error(err, "unable to create controller", "controller", "AutoscalingRunnerSet")
|
log.Error(err, "unable to create controller", "controller", "AutoscalingRunnerSet")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = (&actionsgithubcom.EphemeralRunnerReconciler{
|
if err = (&actionsgithubcom.EphemeralRunnerReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Log: log.WithName("EphemeralRunner").WithValues("version", build.Version),
|
Log: log.WithName("EphemeralRunner").WithValues("version", build.Version),
|
||||||
Scheme: mgr.GetScheme(),
|
Scheme: mgr.GetScheme(),
|
||||||
ActionsClient: actionsMultiClient,
|
ActionsClient: actionsMultiClient,
|
||||||
}).SetupWithManager(mgr); err != nil {
|
ResourceBuilder: rb,
|
||||||
|
}).SetupWithManager(mgr, actionsgithubcom.WithMaxConcurrentReconciles(opts.RunnerMaxConcuncurrentReconciles)); err != nil {
|
||||||
log.Error(err, "unable to create controller", "controller", "EphemeralRunner")
|
log.Error(err, "unable to create controller", "controller", "EphemeralRunner")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{
|
if err = (&actionsgithubcom.EphemeralRunnerSetReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Log: log.WithName("EphemeralRunnerSet").WithValues("version", build.Version),
|
Log: log.WithName("EphemeralRunnerSet").WithValues("version", build.Version),
|
||||||
Scheme: mgr.GetScheme(),
|
Scheme: mgr.GetScheme(),
|
||||||
ActionsClient: actionsMultiClient,
|
ActionsClient: actionsMultiClient,
|
||||||
PublishMetrics: metricsAddr != "0",
|
PublishMetrics: metricsAddr != "0",
|
||||||
|
ResourceBuilder: rb,
|
||||||
}).SetupWithManager(mgr); err != nil {
|
}).SetupWithManager(mgr); err != nil {
|
||||||
log.Error(err, "unable to create controller", "controller", "EphemeralRunnerSet")
|
log.Error(err, "unable to create controller", "controller", "EphemeralRunnerSet")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@@ -299,6 +317,7 @@ func main() {
|
|||||||
Scheme: mgr.GetScheme(),
|
Scheme: mgr.GetScheme(),
|
||||||
ListenerMetricsAddr: listenerMetricsAddr,
|
ListenerMetricsAddr: listenerMetricsAddr,
|
||||||
ListenerMetricsEndpoint: listenerMetricsEndpoint,
|
ListenerMetricsEndpoint: listenerMetricsEndpoint,
|
||||||
|
ResourceBuilder: rb,
|
||||||
}).SetupWithManager(mgr); err != nil {
|
}).SetupWithManager(mgr); err != nil {
|
||||||
log.Error(err, "unable to create controller", "controller", "AutoscalingListener")
|
log.Error(err, "unable to create controller", "controller", "AutoscalingListener")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
|||||||
15
prep_fork.sh
15
prep_fork.sh
@@ -1,15 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
for f in arc-publish.yaml arc-{publish,validate}-chart.yaml \
|
|
||||||
arc-{release-runners,validate-runners,update-runners-scheduled}.yaml gha-{publish,validate}-chart.yaml \
|
|
||||||
global-run-{first-interaction,stale}.yaml; do
|
|
||||||
echo "Processing $f"
|
|
||||||
git rm .github/workflows/$f
|
|
||||||
done
|
|
||||||
|
|
||||||
git commit -m "Remove workflows unused in a forked repo"
|
|
||||||
|
|
||||||
# cherry-pick: Remove legacy-canary-build job from global-publish-canary.yaml as unused in a forked repo
|
|
||||||
git cherry-pick 842b16d71498a5f2a1fc17c0917372435464d49c
|
|
||||||
@@ -6,8 +6,8 @@ DIND_ROOTLESS_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind-rootless
|
|||||||
OS_IMAGE ?= ubuntu-22.04
|
OS_IMAGE ?= ubuntu-22.04
|
||||||
TARGETPLATFORM ?= $(shell arch)
|
TARGETPLATFORM ?= $(shell arch)
|
||||||
|
|
||||||
RUNNER_VERSION ?= 2.316.1
|
RUNNER_VERSION ?= 2.319.1
|
||||||
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.6.0
|
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.6.1
|
||||||
DOCKER_VERSION ?= 24.0.7
|
DOCKER_VERSION ?= 24.0.7
|
||||||
|
|
||||||
# default list of platforms for which multiarch image is built
|
# default list of platforms for which multiarch image is built
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
RUNNER_VERSION=2.316.1
|
RUNNER_VERSION=2.319.1
|
||||||
RUNNER_CONTAINER_HOOKS_VERSION=0.6.0
|
RUNNER_CONTAINER_HOOKS_VERSION=0.6.1
|
||||||
@@ -36,8 +36,8 @@ var (
|
|||||||
|
|
||||||
testResultCMNamePrefix = "test-result-"
|
testResultCMNamePrefix = "test-result-"
|
||||||
|
|
||||||
RunnerVersion = "2.316.1"
|
RunnerVersion = "2.319.1"
|
||||||
RunnerContainerHooksVersion = "0.6.0"
|
RunnerContainerHooksVersion = "0.6.1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// If you're willing to run this test via VS Code "run test" or "debug test",
|
// If you're willing to run this test via VS Code "run test" or "debug test",
|
||||||
@@ -1106,7 +1106,7 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
|
|||||||
testing.Step{
|
testing.Step{
|
||||||
Uses: "actions/setup-go@v3",
|
Uses: "actions/setup-go@v3",
|
||||||
With: &testing.With{
|
With: &testing.With{
|
||||||
GoVersion: "1.22.1",
|
GoVersion: "1.22.4",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@@ -1236,7 +1236,7 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
|
|||||||
testing.Step{
|
testing.Step{
|
||||||
Uses: "azure/setup-kubectl@v1",
|
Uses: "azure/setup-kubectl@v1",
|
||||||
With: &testing.With{
|
With: &testing.With{
|
||||||
Version: "v1.22.1",
|
Version: "v1.22.4",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
testing.Step{
|
testing.Step{
|
||||||
|
|||||||
@@ -355,7 +355,7 @@ nodes:
|
|||||||
image: %s
|
image: %s
|
||||||
`, k.Name, image, image))
|
`, k.Name, image, image))
|
||||||
|
|
||||||
if err := os.WriteFile(f.Name(), kindConfig, 0644); err != nil {
|
if err := os.WriteFile(f.Name(), kindConfig, 0o644); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -385,7 +385,7 @@ func (k *Kind) LoadImages(ctx context.Context, images []ContainerImage) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tmpDir := filepath.Join(wd, ".testing", k.Name)
|
tmpDir := filepath.Join(wd, ".testing", k.Name)
|
||||||
if err := os.MkdirAll(tmpDir, 0755); err != nil {
|
if err := os.MkdirAll(tmpDir, 0o755); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
package testing
|
package testing
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ActionsCheckout = "actions/checkout@v3"
|
ActionsCheckout = "actions/checkout@v4"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Workflow struct {
|
type Workflow struct {
|
||||||
|
|||||||
Reference in New Issue
Block a user