mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 03:13:15 +00:00
Compare commits
157 Commits
v0.23.0
...
actions-ru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4ede0c18d0 | ||
|
|
9091d9b756 | ||
|
|
a09c2564d9 | ||
|
|
a555c90fd5 | ||
|
|
38644cf4e8 | ||
|
|
23f357db10 | ||
|
|
584745b67d | ||
|
|
df9592dc99 | ||
|
|
8071ac7066 | ||
|
|
3c33eca501 | ||
|
|
aa827474b2 | ||
|
|
c75c9f9226 | ||
|
|
c09a04ec01 | ||
|
|
618276e3d3 | ||
|
|
18dd89c884 | ||
|
|
98b17dc0a5 | ||
|
|
c658dcfa6d | ||
|
|
c4996d4bbd | ||
|
|
7a3fa4f362 | ||
|
|
1bfd743e69 | ||
|
|
734f3bd63a | ||
|
|
409dc4c114 | ||
|
|
4b9a6c6700 | ||
|
|
86e1a4a8f3 | ||
|
|
544d620bc3 | ||
|
|
1cfe1974c4 | ||
|
|
7e4b6ebd6d | ||
|
|
11cb9b7882 | ||
|
|
10b88bf070 | ||
|
|
8b619e7c6f | ||
|
|
fea1457f12 | ||
|
|
473295e3fc | ||
|
|
9f6f962fc7 | ||
|
|
2a475f25c7 | ||
|
|
dd9f25ea78 | ||
|
|
b8e4eee904 | ||
|
|
edbdef8d20 | ||
|
|
a190fa97bb | ||
|
|
bfc5ea4727 | ||
|
|
5a9e8545aa | ||
|
|
4446ba57e1 | ||
|
|
d62c8a4697 | ||
|
|
946d5b1fa7 | ||
|
|
da6b07660e | ||
|
|
e3deb0d752 | ||
|
|
82641e5036 | ||
|
|
2fe6adf5b7 | ||
|
|
736126b793 | ||
|
|
6abf5bbac8 | ||
|
|
dc4f116bda | ||
|
|
cda10fd243 | ||
|
|
b5d1a63bdf | ||
|
|
6f3e23973d | ||
|
|
a517c1ff66 | ||
|
|
9b28e633c1 | ||
|
|
8161136cbd | ||
|
|
a9ac5a1cbf | ||
|
|
d4f35cff4f | ||
|
|
f661249f07 | ||
|
|
73e430ce54 | ||
|
|
858ef8979d | ||
|
|
1ce0a183a6 | ||
|
|
63935d2053 | ||
|
|
fc63d6d26e | ||
|
|
5ea08411e6 | ||
|
|
067ed2e5ec | ||
|
|
d86bd2bcd7 | ||
|
|
ddd417f756 | ||
|
|
0386c0734c | ||
|
|
af96de6184 | ||
|
|
abb8615796 | ||
|
|
bc7a3cab1b | ||
|
|
e2c8163b8c | ||
|
|
84d16c1c12 | ||
|
|
071898c96b | ||
|
|
f24e2fa44e | ||
|
|
3c7d3d6b57 | ||
|
|
23f091d7fa | ||
|
|
667764e027 | ||
|
|
de693c4191 | ||
|
|
510fc9c834 | ||
|
|
7fd5e24961 | ||
|
|
9974b1a2b7 | ||
|
|
bd91b73fd9 | ||
|
|
a7ae910ee4 | ||
|
|
2733c36d0e | ||
|
|
0ef9a22cd4 | ||
|
|
933b0c7888 | ||
|
|
1b7ec33135 | ||
|
|
a62882d243 | ||
|
|
0cd13fe51d | ||
|
|
01c8dc237e | ||
|
|
7c4db63718 | ||
|
|
3d88b9630a | ||
|
|
1152e6b31d | ||
|
|
ac27df8301 | ||
|
|
9dd26168d6 | ||
|
|
18bfb28c0b | ||
|
|
84210e900b | ||
|
|
ef3313d147 | ||
|
|
c7eea169ad | ||
|
|
63be0223ad | ||
|
|
5bbea772f7 | ||
|
|
2aa3f1e142 | ||
|
|
3e988afc09 | ||
|
|
84210f3d2b | ||
|
|
536692181b | ||
|
|
23403172cb | ||
|
|
8a8ec43364 | ||
|
|
78c01fd31d | ||
|
|
bf45aa9f6b | ||
|
|
b5aa1750bb | ||
|
|
cdc9d20e7a | ||
|
|
8035d6d9f8 | ||
|
|
65f7ee92a6 | ||
|
|
fca8a538db | ||
|
|
95ddc77245 | ||
|
|
b5194fd75a | ||
|
|
adf69bbea0 | ||
|
|
b43ef70ac6 | ||
|
|
f1caebbaf0 | ||
|
|
ede28f5046 | ||
|
|
f08ab1490d | ||
|
|
772ca57056 | ||
|
|
51b13e3bab | ||
|
|
81017b130f | ||
|
|
bdbcf66569 | ||
|
|
0e15a78541 | ||
|
|
f85c3d06d9 | ||
|
|
51ba7d7160 | ||
|
|
759349de11 | ||
|
|
3014e98681 | ||
|
|
5f4be6a883 | ||
|
|
b98f470a70 | ||
|
|
e46b90f758 | ||
|
|
3a7e8c844b | ||
|
|
65a67ee61c | ||
|
|
215ba36fd1 | ||
|
|
27774b47bd | ||
|
|
fbde2b9a41 | ||
|
|
212098183a | ||
|
|
4a5097d8cf | ||
|
|
9c57d085f8 | ||
|
|
d6622f9369 | ||
|
|
3b67ee727f | ||
|
|
e6bddcd238 | ||
|
|
f60e57d789 | ||
|
|
3ca1152420 | ||
|
|
e94fa19843 | ||
|
|
99832d7104 | ||
|
|
289bcd8b64 | ||
|
|
5e8cba82c2 | ||
|
|
dabbc99c78 | ||
|
|
d01595cfbc | ||
|
|
c1e5829b03 | ||
|
|
800d6bd586 | ||
|
|
d3b7f0bf7d |
29
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
29
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -17,6 +17,12 @@ body:
|
||||
label: Helm Chart Version
|
||||
description: Run `helm list` and see what's shown under CHART VERSION. Any release tags prefixed with `actions-runner-controller-` are for chart releases
|
||||
placeholder: ex. 0.11.0
|
||||
- type: input
|
||||
id: cert-manager-version
|
||||
attributes:
|
||||
label: CertManager Version
|
||||
description: Run `kubectl get po -o yaml $CERT_MANAGER_POD` and see the image tag, or run `helm list` and see what's shown under APP VERSION for your cert-manager Helm release.
|
||||
placeholder: ex. 1.8
|
||||
- type: dropdown
|
||||
id: deployment-method
|
||||
attributes:
|
||||
@@ -29,6 +35,17 @@ body:
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: cert-manager
|
||||
attributes:
|
||||
label: cert-manager installation
|
||||
description: Confirm that you've installed cert-manager correctly by answering a few questions
|
||||
placeholder: |
|
||||
- Did you follow https://github.com/actions-runner-controller/actions-runner-controller#installation? If not, describe the installation process so that we can reproduce your environment.
|
||||
- Are you sure you've installed cert-manager from an official source?
|
||||
(Note that we won't provide user support for cert-manager itself. Make sure cert-manager is fully working before testing ARC or reporting a bug
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
id: checks
|
||||
attributes:
|
||||
@@ -41,7 +58,7 @@ body:
|
||||
required: true
|
||||
- label: My actions-runner-controller version (v0.x.y) does support the feature
|
||||
required: true
|
||||
- label: I've already upgraded ARC to the latest and it didn't fix the issue
|
||||
- label: I've already upgraded ARC (including the CRDs, see charts/actions-runner-controller/docs/UPGRADING.md for details) to the latest and it didn't fix the issue
|
||||
required: true
|
||||
- type: textarea
|
||||
id: resource-definitions
|
||||
@@ -113,9 +130,11 @@ body:
|
||||
id: controller-logs
|
||||
attributes:
|
||||
label: Controller Logs
|
||||
description: "Include logs from `actions-runner-controller`'s controller-manager pod"
|
||||
description: "NEVER EVER OMIT THIS! Include logs from `actions-runner-controller`'s controller-manager pod"
|
||||
render: shell
|
||||
placeholder: |
|
||||
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||
|
||||
To grab controller logs:
|
||||
|
||||
# Set NS according to your setup
|
||||
@@ -125,8 +144,6 @@ body:
|
||||
kubectl -n $NS get po
|
||||
|
||||
kubectl -n $NS logs $POD_NAME > arc.log
|
||||
|
||||
Upload it to e.g. https://gist.github.com/ and paste the link to it here.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
@@ -136,6 +153,8 @@ body:
|
||||
description: "Include logs from runner pod(s)"
|
||||
render: shell
|
||||
placeholder: |
|
||||
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||
|
||||
To grab the runner pod logs:
|
||||
|
||||
# Set NS according to your setup. It should match your RunnerDeployment's metadata.namespace.
|
||||
@@ -146,8 +165,6 @@ body:
|
||||
|
||||
kubectl -n $NS logs $POD_NAME -c runner > runnerpod_runner.log
|
||||
kubectl -n $NS logs $POD_NAME -c docker > runnerpod_docker.log
|
||||
|
||||
Upload it to e.g. https://gist.github.com/ and paste the link to it here.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
|
||||
@@ -29,23 +29,23 @@ runs:
|
||||
shell: bash
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.ref == 'master' && github.event.pull_request.merged == true }}
|
||||
uses: docker/login-action@v1
|
||||
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' }}
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ inputs.username }}
|
||||
password: ${{ inputs.password }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v1
|
||||
if: ${{ github.ref == 'master' && github.event.pull_request.merged == true }}
|
||||
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' }}
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ inputs.ghcr_username }}
|
||||
|
||||
25
.github/lock.yml
vendored
25
.github/lock.yml
vendored
@@ -1,25 +0,0 @@
|
||||
# Configuration for Lock Threads
|
||||
# Repo: https://github.com/dessant/lock-threads-app
|
||||
# App: https://github.com/apps/lock
|
||||
|
||||
# Number of days of inactivity before a closed issue or pull request is locked
|
||||
daysUntilLock: 7
|
||||
|
||||
# Skip issues and pull requests created before a given timestamp. Timestamp must
|
||||
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
|
||||
skipCreatedBefore: false
|
||||
|
||||
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
|
||||
exemptLabels: []
|
||||
|
||||
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
||||
lockLabel: false
|
||||
|
||||
# Comment to post before locking. Set to `false` to disable
|
||||
lockComment: >
|
||||
This thread has been automatically locked since there has not been
|
||||
any recent activity after it was closed. Please open a new issue for
|
||||
related bugs.
|
||||
|
||||
# Assign `resolved` as the reason for locking. Set to `false` to disable
|
||||
setLockReason: true
|
||||
6
.github/renovate.json5
vendored
6
.github/renovate.json5
vendored
@@ -13,7 +13,7 @@
|
||||
{
|
||||
// use https://github.com/actions/runner/releases
|
||||
"fileMatch": [
|
||||
".github/workflows/runners.yml"
|
||||
".github/workflows/runners.yaml"
|
||||
],
|
||||
"matchStrings": ["RUNNER_VERSION: +(?<currentValue>.*?)\\n"],
|
||||
"depNameTemplate": "actions/runner",
|
||||
@@ -30,8 +30,8 @@
|
||||
},
|
||||
{
|
||||
"fileMatch": [
|
||||
"runner/Dockerfile",
|
||||
"runner/Dockerfile.dindrunner"
|
||||
"runner/actions-runner.dockerfile",
|
||||
"runner/actions-runner-dind.dockerfile"
|
||||
],
|
||||
"matchStrings": ["RUNNER_VERSION=+(?<currentValue>.*?)\\n"],
|
||||
"depNameTemplate": "actions/runner",
|
||||
|
||||
@@ -1,26 +1,28 @@
|
||||
name: Publish Controller Image
|
||||
name: Publish ARC
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
types:
|
||||
- published
|
||||
|
||||
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
release-controller:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||
steps:
|
||||
- name: Set outputs
|
||||
id: vars
|
||||
run: echo ::set-output name=sha_short::${GITHUB_SHA::7}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17.7'
|
||||
go-version: '1.18.2'
|
||||
|
||||
- name: Install tools
|
||||
run: |
|
||||
@@ -39,25 +41,20 @@ jobs:
|
||||
- name: Upload artifacts
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: make github-release
|
||||
run: |
|
||||
make github-release
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Setup Docker Environment
|
||||
id: vars
|
||||
uses: ./.github/actions/setup-docker-environment
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
ghcr_username: ${{ github.actor }}
|
||||
ghcr_password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and Push
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@@ -66,4 +63,8 @@ jobs:
|
||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
|
||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}
|
||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}-${{ steps.vars.outputs.sha_short }}
|
||||
|
||||
ghcr.io/actions-runner-controller/actions-runner-controller:latest
|
||||
ghcr.io/actions-runner-controller/actions-runner-controller:${{ env.VERSION }}
|
||||
ghcr.io/actions-runner-controller/actions-runner-controller:${{ env.VERSION }}-${{ steps.vars.outputs.sha_short }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
58
.github/workflows/publish-canary.yaml
vendored
Normal file
58
.github/workflows/publish-canary.yaml
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
name: Publish Canary Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/workflows/validate-chart.yaml'
|
||||
- '.github/workflows/publish-chart.yaml'
|
||||
- '.github/workflows/publish-arc.yaml'
|
||||
- '.github/workflows/runners.yaml'
|
||||
- '.github/workflows/validate-entrypoint.yaml'
|
||||
- '.github/renovate.*'
|
||||
- 'runner/**'
|
||||
- '.gitignore'
|
||||
- 'PROJECT'
|
||||
- 'LICENSE'
|
||||
- 'Makefile'
|
||||
|
||||
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
canary-build:
|
||||
name: Build and Publish Canary Image
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Docker Environment
|
||||
id: vars
|
||||
uses: ./.github/actions/setup-docker-environment
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
ghcr_username: ${{ github.actor }}
|
||||
ghcr_password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Considered unstable builds
|
||||
# See Issue #285, PR #286, and PR #323 for more information
|
||||
- name: Build and Push
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:canary
|
||||
ghcr.io/actions-runner-controller/actions-runner-controller:canary
|
||||
cache-from: type=gha,scope=arc-canary
|
||||
cache-to: type=gha,mode=max,scope=arc-canary
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Publish helm chart
|
||||
name: Publish Helm Chart
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -6,7 +6,7 @@ on:
|
||||
- master
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/on-push-master-publish-chart.yml'
|
||||
- '.github/workflows/publish-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!**.md'
|
||||
workflow_dispatch:
|
||||
@@ -15,10 +15,13 @@ env:
|
||||
KUBE_SCORE_VERSION: 1.10.0
|
||||
HELM_VERSION: v3.8.0
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint-chart:
|
||||
runs-on: ubuntu-latest
|
||||
name: Lint Chart
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
|
||||
steps:
|
||||
@@ -28,7 +31,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v2.1
|
||||
uses: azure/setup-helm@v3.1
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
@@ -49,9 +52,9 @@ jobs:
|
||||
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.7
|
||||
python-version: '3.7'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.2.1
|
||||
@@ -65,22 +68,23 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: ct lint --config charts/.ci/ct-config.yaml
|
||||
run: |
|
||||
ct lint --config charts/.ci/ct-config.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.2.0
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
uses: helm/kind-action@v1.3.0
|
||||
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
- name: Install cert-manager
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
run: ct install --config charts/.ci/ct-config.yaml
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: ct install --config charts/.ci/ct-config.yaml
|
||||
|
||||
# WARNING: This relies on the latest release being inat the top of the JSON from GitHub and a clean chart.yaml
|
||||
- name: Check if Chart Publish is Needed
|
||||
@@ -99,8 +103,11 @@ jobs:
|
||||
publish-chart:
|
||||
if: needs.lint-chart.outputs.publish-chart == 'true'
|
||||
needs: lint-chart
|
||||
runs-on: ubuntu-latest
|
||||
name: Publish Chart
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write # for helm/chart-releaser-action to push chart release and create a release
|
||||
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
32
.github/workflows/run-codeql.yaml
vendored
Normal file
32
.github/workflows/run-codeql.yaml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Run CodeQL
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
schedule:
|
||||
- cron: '30 1 * * 0'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: go
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
@@ -1,12 +1,18 @@
|
||||
name: 'Close stale issues and PRs'
|
||||
name: Run Stale Bot
|
||||
on:
|
||||
schedule:
|
||||
# 01:30 every day
|
||||
- cron: '30 1 * * *'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
name: Run Stale
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write # for actions/stale to close stale issues
|
||||
pull-requests: write # for actions/stale to close stale PRs
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
@@ -2,31 +2,41 @@ name: Runners
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
- closed
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'runner/**'
|
||||
- '!runner/Makefile'
|
||||
- .github/workflows/runners.yml
|
||||
- '.github/workflows/runners.yaml'
|
||||
- '!**.md'
|
||||
# We must do a trigger on a push: instead of a types: closed so GitHub Secrets
|
||||
# are available to the workflow run
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'runner/**'
|
||||
- '!runner/Makefile'
|
||||
- '.github/workflows/runners.yaml'
|
||||
- '!**.md'
|
||||
|
||||
env:
|
||||
RUNNER_VERSION: 2.290.1
|
||||
RUNNER_VERSION: 2.294.0
|
||||
DOCKER_VERSION: 20.10.12
|
||||
RUNNER_CONTAINER_HOOKS_VERSION: 0.1.2
|
||||
DOCKERHUB_USERNAME: summerwind
|
||||
|
||||
jobs:
|
||||
build:
|
||||
build-runners:
|
||||
name: Build ${{ matrix.name }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
name: Build ${{ matrix.name }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -34,11 +44,9 @@ jobs:
|
||||
- name: actions-runner
|
||||
os-name: ubuntu
|
||||
os-version: 20.04
|
||||
dockerfile: Dockerfile
|
||||
- name: actions-runner-dind
|
||||
os-name: ubuntu
|
||||
os-version: 20.04
|
||||
dockerfile: Dockerfile.dindrunner
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -47,22 +55,23 @@ jobs:
|
||||
- name: Setup Docker Environment
|
||||
id: vars
|
||||
uses: ./.github/actions/setup-docker-environment
|
||||
with:
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
ghcr_username: ${{ github.actor }}
|
||||
ghcr_password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and Push Versioned Tags
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: ./runner
|
||||
file: ./runner/${{ matrix.dockerfile }}
|
||||
file: ./runner/${{ matrix.name }}.dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.ref == 'master' && github.event.pull_request.merged == true }}
|
||||
push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }}
|
||||
build-args: |
|
||||
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||
RUNNER_CONTAINER_HOOKS_VERSION=${{ env.RUNNER_CONTAINER_HOOKS_VERSION }}
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
||||
@@ -70,5 +79,5 @@ jobs:
|
||||
ghcr.io/${{ github.repository }}/${{ matrix.name }}:latest
|
||||
ghcr.io/${{ github.repository }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
||||
ghcr.io/${{ github.repository }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
cache-from: type=gha,scope=build-${{ matrix.name }}
|
||||
cache-to: type=gha,mode=max,scope=build-${{ matrix.name }}
|
||||
@@ -1,45 +1,59 @@
|
||||
name: CI
|
||||
name: Validate ARC
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths-ignore:
|
||||
- .github/workflows/runners.yml
|
||||
- .github/workflows/on-push-lint-charts.yml
|
||||
- .github/workflows/on-push-master-publish-chart.yml
|
||||
- .github/workflows/release.yml
|
||||
- .github/workflows/test-entrypoint.yml
|
||||
- .github/workflows/wip.yml
|
||||
- 'runner/**'
|
||||
- '**.md'
|
||||
- '.github/ISSUE_TEMPLATE/**'
|
||||
- '.github/workflows/publish-canary.yaml'
|
||||
- '.github/workflows/validate-chart.yaml'
|
||||
- '.github/workflows/publish-chart.yaml'
|
||||
- '.github/workflows/runners.yaml'
|
||||
- '.github/workflows/publish-arc.yaml'
|
||||
- '.github/workflows/validate-entrypoint.yaml'
|
||||
- '.github/renovate.*'
|
||||
- 'runner/**'
|
||||
- '.gitignore'
|
||||
- 'PROJECT'
|
||||
- 'LICENSE'
|
||||
- 'Makefile'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
test-controller:
|
||||
name: Test ARC
|
||||
runs-on: ubuntu-latest
|
||||
name: Test
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
|
||||
- name: Set-up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.17.7'
|
||||
go-version: '1.18.2'
|
||||
check-latest: false
|
||||
- run: go version
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Install kubebuilder
|
||||
run: |
|
||||
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
|
||||
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
|
||||
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
run: |
|
||||
make test
|
||||
|
||||
- name: Verify manifests are up-to-date
|
||||
run: |
|
||||
make manifests
|
||||
@@ -1,10 +1,10 @@
|
||||
name: Lint and Test Charts
|
||||
name: Validate Helm Chart
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/on-push-lint-charts.yml'
|
||||
- '.github/workflows/validate-chart.yaml'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!**.md'
|
||||
workflow_dispatch:
|
||||
@@ -12,10 +12,13 @@ env:
|
||||
KUBE_SCORE_VERSION: 1.10.0
|
||||
HELM_VERSION: v3.8.0
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint-test:
|
||||
runs-on: ubuntu-latest
|
||||
validate-chart:
|
||||
name: Lint Chart
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
@@ -23,7 +26,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v2.1
|
||||
uses: azure/setup-helm@v3.1
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
@@ -44,9 +47,9 @@ jobs:
|
||||
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||
|
||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||
- uses: actions/setup-python@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.7
|
||||
python-version: '3.7'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.2.1
|
||||
@@ -60,18 +63,20 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: ct lint --config charts/.ci/ct-config.yaml
|
||||
run: |
|
||||
ct lint --config charts/.ci/ct-config.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.2.0
|
||||
uses: helm/kind-action@v1.3.0
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
|
||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||
- name: Install cert-manager
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
run: |
|
||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
run: ct install --config charts/.ci/ct-config.yaml
|
||||
run: |
|
||||
ct install --config charts/.ci/ct-config.yaml
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Unit tests for entrypoint
|
||||
name: Validate Runners
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -9,13 +9,17 @@ on:
|
||||
- 'test/entrypoint/**'
|
||||
- '!**.md'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
test-runner-entrypoint:
|
||||
name: Test entrypoint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Run unit tests for entrypoint.sh
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
make acceptance/runner/entrypoint
|
||||
51
.github/workflows/wip.yml
vendored
51
.github/workflows/wip.yml
vendored
@@ -1,51 +0,0 @@
|
||||
name: Publish Canary Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths-ignore:
|
||||
- .github/workflows/runners.yml
|
||||
- .github/workflows/on-push-lint-charts.yml
|
||||
- .github/workflows/on-push-master-publish-chart.yml
|
||||
- .github/workflows/release.yml
|
||||
- .github/workflows/test-entrypoint.yml
|
||||
- "runner/**"
|
||||
- "**.md"
|
||||
- ".gitignore"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build and Publish Canary Image
|
||||
env:
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
|
||||
# Considered unstable builds
|
||||
# See Issue #285, PR #286, and PR #323 for more information
|
||||
- name: Build and Push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:canary
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build the manager binary
|
||||
FROM --platform=$BUILDPLATFORM golang:1.17 as builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.18.4 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
|
||||
9
Makefile
9
Makefile
@@ -5,7 +5,7 @@ else
|
||||
endif
|
||||
DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1)
|
||||
VERSION ?= latest
|
||||
RUNNER_VERSION ?= 2.290.1
|
||||
RUNNER_VERSION ?= 2.294.0
|
||||
TARGETPLATFORM ?= $(shell arch)
|
||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||
RUNNER_TAG ?= ${VERSION}
|
||||
@@ -15,7 +15,6 @@ TEST_ORG_REPO ?=
|
||||
TEST_EPHEMERAL ?= false
|
||||
SYNC_PERIOD ?= 1m
|
||||
USE_RUNNERSET ?=
|
||||
RUNNER_FEATURE_FLAG_EPHEMERAL ?=
|
||||
KUBECONTEXT ?= kind-acceptance
|
||||
CLUSTER ?= acceptance
|
||||
CERT_MANAGER_VERSION ?= v1.1.1
|
||||
@@ -57,6 +56,7 @@ GO_TEST_ARGS ?= -short
|
||||
# Run tests
|
||||
test: generate fmt vet manifests
|
||||
go test $(GO_TEST_ARGS) ./... -coverprofile cover.out
|
||||
go test -fuzz=Fuzz -fuzztime=10s -run=Fuzz* ./controllers
|
||||
|
||||
test-with-deps: kube-apiserver etcd kubectl
|
||||
# See https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants
|
||||
@@ -188,7 +188,6 @@ acceptance/deploy:
|
||||
TEST_ORG=${TEST_ORG} TEST_ORG_REPO=${TEST_ORG_REPO} SYNC_PERIOD=${SYNC_PERIOD} \
|
||||
USE_RUNNERSET=${USE_RUNNERSET} \
|
||||
TEST_EPHEMERAL=${TEST_EPHEMERAL} \
|
||||
RUNNER_FEATURE_FLAG_EPHEMERAL=${RUNNER_FEATURE_FLAG_EPHEMERAL} \
|
||||
acceptance/deploy.sh
|
||||
|
||||
acceptance/tests:
|
||||
@@ -223,7 +222,7 @@ ifeq (, $(wildcard $(GOBIN)/controller-gen))
|
||||
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$CONTROLLER_GEN_TMP_DIR ;\
|
||||
go mod init tmp ;\
|
||||
go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0 ;\
|
||||
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0 ;\
|
||||
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
|
||||
}
|
||||
endif
|
||||
@@ -243,7 +242,7 @@ ifeq (, $(wildcard $(GOBIN)/yq))
|
||||
YQ_TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$YQ_TMP_DIR ;\
|
||||
go mod init tmp ;\
|
||||
go get github.com/mikefarah/yq/v3@3.4.0 ;\
|
||||
go install github.com/mikefarah/yq/v3@3.4.0 ;\
|
||||
rm -rf $$YQ_TMP_DIR ;\
|
||||
}
|
||||
endif
|
||||
|
||||
569
README.md
569
README.md
@@ -1,11 +1,13 @@
|
||||
# actions-runner-controller (ARC)
|
||||
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/6061)
|
||||
[](https://github.com/jonico/awesome-runners)
|
||||
|
||||
This controller operates self-hosted runners for GitHub Actions on your Kubernetes cluster.
|
||||
|
||||
ToC:
|
||||
|
||||
- [People](#people)
|
||||
- [Status](#status)
|
||||
- [About](#about)
|
||||
- [Installation](#installation)
|
||||
@@ -13,21 +15,23 @@ ToC:
|
||||
- [Setting Up Authentication with GitHub API](#setting-up-authentication-with-github-api)
|
||||
- [Deploying Using GitHub App Authentication](#deploying-using-github-app-authentication)
|
||||
- [Deploying Using PAT Authentication](#deploying-using-pat-authentication)
|
||||
- [Deploying Multiple Controllers](#deploying-multiple-controllers)
|
||||
- [Deploying Multiple Controllers](#deploying-multiple-controllers)
|
||||
- [Usage](#usage)
|
||||
- [Repository Runners](#repository-runners)
|
||||
- [Organization Runners](#organization-runners)
|
||||
- [Enterprise Runners](#enterprise-runners)
|
||||
- [RunnerDeployments](#runnerdeployments)
|
||||
- [RunnerSets](#runnersets)
|
||||
- [Persistent Runners](#persistent-runners)
|
||||
- [Persistent Runners](#persistent-runners)
|
||||
- [Autoscaling](#autoscaling)
|
||||
- [Anti-Flapping Configuration](#anti-flapping-configuration)
|
||||
- [Pull Driven Scaling](#pull-driven-scaling)
|
||||
- [Webhook Driven Scaling](#webhook-driven-scaling)
|
||||
- [Autoscaling to/from 0](#autoscaling-tofrom-0)
|
||||
- [Scheduled Overrides](#scheduled-overrides)
|
||||
- [Runner with DinD](#runner-with-dind)
|
||||
- [Alternative Runners](#alternative-runners)
|
||||
- [Runner with DinD](#runner-with-dind)
|
||||
- [Runner with k8s jobs](#runner-with-k8s-jobs)
|
||||
- [Additional Tweaks](#additional-tweaks)
|
||||
- [Custom Volume mounts](#custom-volume-mounts)
|
||||
- [Runner Labels](#runner-labels)
|
||||
@@ -36,10 +40,25 @@ ToC:
|
||||
- [Using IRSA (IAM Roles for Service Accounts) in EKS](#using-irsa-iam-roles-for-service-accounts-in-eks)
|
||||
- [Software Installed in the Runner Image](#software-installed-in-the-runner-image)
|
||||
- [Using without cert-manager](#using-without-cert-manager)
|
||||
- [Multitenancy](#multitenancy)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Contributing](#contributing)
|
||||
|
||||
|
||||
## People
|
||||
|
||||
`actions-runner-controller` is an open-source project currently developed and maintained in collaboration with maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions-runner-controller/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions-runner-controller/actions-runner-controller/discussions), mostly in their spare time.
|
||||
|
||||
If you think the project is awesome and it's becoming a basis for your important business, consider [sponsoring us](https://github.com/sponsors/actions-runner-controller)!
|
||||
|
||||
In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means!
|
||||
|
||||
We don't currently have [any sponsors dedicated to this project yet](https://github.com/sponsors/actions-runner-controller).
|
||||
|
||||
However, [HelloFresh](https://www.hellofreshgroup.com/en/) has recently started sponsoring @mumoshu for this project along with his other works. A part of their sponsorship will enable @mumoshu to add an E2E test to keep ARC even more reliable on AWS. Thank you for your sponsorship!
|
||||
|
||||
[<img src="https://user-images.githubusercontent.com/22009/170898715-07f02941-35ec-418b-8cd4-251b422fa9ac.png" width="219" height="71" />](https://careers.hellofresh.com/)
|
||||
|
||||
## Status
|
||||
|
||||
Even though actions-runner-controller is used in production environments, it is still in its early stage of development, hence versioned 0.x.
|
||||
@@ -59,7 +78,7 @@ By default, actions-runner-controller uses [cert-manager](https://cert-manager.i
|
||||
|
||||
- [Installing cert-manager on Kubernetes](https://cert-manager.io/docs/installation/kubernetes/)
|
||||
|
||||
Subsequent to this, install the custom resource definitions and actions-runner-controller with `kubectl` or `helm`. This will create an actions-runner-system namespace in your Kubernetes and deploy the required resources.
|
||||
After installing cert-manager, install the custom resource definitions and actions-runner-controller with `kubectl` or `helm`. This will create an actions-runner-system namespace in your Kubernetes and deploy the required resources.
|
||||
|
||||
**Kubectl Deployment:**
|
||||
|
||||
@@ -207,7 +226,7 @@ Log-in to a GitHub account that has `admin` privileges for the repository, and [
|
||||
|
||||
_Note: When you deploy enterprise runners they will get access to organizations, however, access to the repositories themselves is **NOT** allowed by default. Each GitHub organization must allow enterprise runner groups to be used in repositories as an initial one-time configuration step, this only needs to be done once after which it is permanent for that runner group._
|
||||
|
||||
_Note: GitHub does not document exactly what permissions you get with each PAT scope beyond a vague description. The best documentation they provide on the topic can be found [here](https://docs.github.com/en/developers/apps/building-oauth-apps/scopes-for-oauth-apps) if you wish to review. The docs target OAuth apps and so are incomplete and may not be 100% accurate._
|
||||
_Note: GitHub does not document exactly what permissions you get with each PAT scope beyond a vague description. The best documentation they provide on the topic can be found [here](https://docs.github.com/en/developers/apps/building-oauth-apps/scopes-for-oauth-apps) if you wish to review. The docs target OAuth apps and so are incomplete and may not be 100% accurate._
|
||||
|
||||
---
|
||||
|
||||
@@ -239,7 +258,7 @@ You can deploy multiple controllers either in a single shared namespace, or in a
|
||||
|
||||
If you plan on installing all instances of the controller stack into a single namespace there are a few things you need to do for this to work.
|
||||
|
||||
1. All resources per stack must have a unique, in the case of Helm this can be done by giving each install a unique release name, or via the `fullnameOverride` properties.
|
||||
1. All resources per stack must have a unique name, in the case of Helm this can be done by giving each install a unique release name, or via the `fullnameOverride` properties.
|
||||
2. `authSecret.name` needs to be unique per stack when each stack is tied to runners in different GitHub organizations and repositories AND you want your GitHub credentials to be narrowly scoped.
|
||||
3. `leaderElectionId` needs to be unique per stack. If this is not unique to the stack the controller tries to race onto the leader election lock resulting in only one stack working concurrently. Your controller will be stuck with a log message something like this `attempting to acquire leader lease arc-controllers/actions-runner-controller...`
|
||||
4. The MutatingWebhookConfiguration in each stack must include a namespace selector for that stack's corresponding runner namespace, this is already configured in the helm chart.
|
||||
@@ -253,52 +272,50 @@ Alternatively, you can install each controller stack into a unique namespace (re
|
||||
- The organization level
|
||||
- The enterprise level
|
||||
|
||||
There are two ways to use this controller:
|
||||
Runners can be deployed as 1 of 2 abstractions:
|
||||
|
||||
- Manage runners one by one with `Runner`.
|
||||
- Manage a set of runners with `RunnerDeployment`.
|
||||
- A `RunnerDeployment` (similar to k8s's `Deployments`, based on `Pods`)
|
||||
- A `RunnerSet` (based on k8s's `StatefulSets`)
|
||||
|
||||
We go into details about the differences between the 2 later, initially lets look at how to deploy a basic `RunnerDeployment` at the 3 possible management hierarchies.
|
||||
|
||||
### Repository Runners
|
||||
|
||||
To launch a single self-hosted runner, you need to create a manifest file that includes a `Runner` resource as follows. This example launches a self-hosted runner with name *example-runner* for the *actions-runner-controller/actions-runner-controller* repository.
|
||||
To launch a single self-hosted runner, you need to create a manifest file that includes a `RunnerDeployment` resource as follows. This example launches a self-hosted runner with name *example-runnerdeploy* for the *actions-runner-controller/actions-runner-controller* repository.
|
||||
|
||||
```yaml
|
||||
# runner.yaml
|
||||
# runnerdeployment.yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: Runner
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: example-runner
|
||||
name: example-runnerdeploy
|
||||
spec:
|
||||
repository: example/myrepo
|
||||
env: []
|
||||
replicas: 1
|
||||
template:
|
||||
spec:
|
||||
repository: mumoshu/actions-runner-controller-ci
|
||||
```
|
||||
|
||||
Apply the created manifest file to your Kubernetes.
|
||||
|
||||
```shell
|
||||
$ kubectl apply -f runner.yaml
|
||||
runner.actions.summerwind.dev/example-runner created
|
||||
$ kubectl apply -f runnerdeployment.yaml
|
||||
runnerdeployment.actions.summerwind.dev/example-runnerdeploy created
|
||||
```
|
||||
|
||||
You can see that the Runner resource has been created.
|
||||
You can see that 1 runner and its underlying pod has been created as specified by `replicas: 1` attribute:
|
||||
|
||||
```shell
|
||||
$ kubectl get runners
|
||||
NAME REPOSITORY STATUS
|
||||
example-runner actions-runner-controller/actions-runner-controller Running
|
||||
```
|
||||
NAME REPOSITORY STATUS
|
||||
example-runnerdeploy2475h595fr mumoshu/actions-runner-controller-ci Running
|
||||
|
||||
You can also see that the runner pod has been running.
|
||||
|
||||
```shell
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
example-runner 2/2 Running 0 1m
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
example-runnerdeploy2475ht2qbr 2/2 Running 0 1m
|
||||
```
|
||||
|
||||
The runner you created has been registered to your repository.
|
||||
|
||||
<img width="756" alt="Actions tab in your repository settings" src="https://user-images.githubusercontent.com/230145/73618667-8cbf9700-466c-11ea-80b6-c67e6d3f70e7.png">
|
||||
The runner you created has been registered directly to the defined repository, you should be able to see it in the settings of the repository.
|
||||
|
||||
Now you can use your self-hosted runner. See the [official documentation](https://help.github.com/en/actions/automating-your-workflow-with-github-actions/using-self-hosted-runners-in-a-workflow) on how to run a job with it.
|
||||
|
||||
@@ -307,13 +324,15 @@ Now you can use your self-hosted runner. See the [official documentation](https:
|
||||
To add the runner to an organization, you only need to replace the `repository` field with `organization`, so the runner will register itself to the organization.
|
||||
|
||||
```yaml
|
||||
# runner.yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: Runner
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: example-org-runner
|
||||
name: example-runnerdeploy
|
||||
spec:
|
||||
organization: your-organization-name
|
||||
replicas: 1
|
||||
template:
|
||||
spec:
|
||||
organization: your-organization-name
|
||||
```
|
||||
|
||||
Now you can see the runner on the organization level (if you have organization owner permissions).
|
||||
@@ -323,24 +342,22 @@ Now you can see the runner on the organization level (if you have organization o
|
||||
To add the runner to an enterprise, you only need to replace the `repository` field with `enterprise`, so the runner will register itself to the enterprise.
|
||||
|
||||
```yaml
|
||||
# runner.yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: Runner
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: example-enterprise-runner
|
||||
name: example-runnerdeploy
|
||||
spec:
|
||||
enterprise: your-enterprise-name
|
||||
replicas: 1
|
||||
template:
|
||||
spec:
|
||||
enterprise: your-enterprise-name
|
||||
```
|
||||
|
||||
Now you can see the runner on the enterprise level (if you have enterprise access permissions).
|
||||
|
||||
### RunnerDeployments
|
||||
|
||||
You can manage sets of runners instead of individually through the `RunnerDeployment` kind and its `replicas:` attribute. This kind is required for many of the advanced features.
|
||||
|
||||
There are `RunnerReplicaSet` and `RunnerDeployment` kinds that corresponds to the `ReplicaSet` and `Deployment` kinds but for the `Runner` kind.
|
||||
|
||||
You typically only need `RunnerDeployment` rather than `RunnerReplicaSet` as the former is for managing the latter.
|
||||
In our previous examples we were deploying a single runner via the `RunnerDeployment` kind, the amount of runners deployed can be statically set via the `replicas:` field, we can increase this value to deploy additioanl sets of runners instead:
|
||||
|
||||
```yaml
|
||||
# runnerdeployment.yaml
|
||||
@@ -349,11 +366,11 @@ kind: RunnerDeployment
|
||||
metadata:
|
||||
name: example-runnerdeploy
|
||||
spec:
|
||||
# This will deploy 2 runners now
|
||||
replicas: 2
|
||||
template:
|
||||
spec:
|
||||
repository: mumoshu/actions-runner-controller-ci
|
||||
env: []
|
||||
```
|
||||
|
||||
Apply the manifest file to your cluster:
|
||||
@@ -372,15 +389,13 @@ example-runnerdeploy2475h595fr mumoshu/actions-runner-controller-ci Running
|
||||
example-runnerdeploy2475ht2qbr mumoshu/actions-runner-controller-ci Running
|
||||
```
|
||||
|
||||
### RunnerSets
|
||||
### RunnerSets
|
||||
|
||||
> This feature requires controller version => [v0.20.0](https://github.com/actions-runner-controller/actions-runner-controller/releases/tag/v0.20.0)
|
||||
|
||||
_Ensure you see the limitations before using this kind!!!!!_
|
||||
|
||||
For scenarios where you require the advantages of a `StatefulSet`, for example persistent storage, ARC implements a runner based on Kubernetes' `StatefulSets`, the `RunnerSet`.
|
||||
|
||||
A basic `RunnerSet` would look like this:
|
||||
We can also deploy sets of RunnerSets the same way, a basic `RunnerSet` would look like this:
|
||||
|
||||
```yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
@@ -388,8 +403,7 @@ kind: RunnerSet
|
||||
metadata:
|
||||
name: example
|
||||
spec:
|
||||
ephemeral: false
|
||||
replicas: 2
|
||||
replicas: 1
|
||||
repository: mumoshu/actions-runner-controller-ci
|
||||
# Other mandatory fields from StatefulSet
|
||||
selector:
|
||||
@@ -402,7 +416,7 @@ spec:
|
||||
app: example
|
||||
```
|
||||
|
||||
As it is based on `StatefulSet`, `selector` and `template.medatada.labels` it needs to be defined and have the exact same set of labels. `serviceName` must be set to some non-empty string as it is also required by `StatefulSet`.
|
||||
As it is based on `StatefulSet`, `selector` and `template.metadata.labels` it needs to be defined and have the exact same set of labels. `serviceName` must be set to some non-empty string as it is also required by `StatefulSet`.
|
||||
|
||||
Runner-related fields like `ephemeral`, `repository`, `organization`, `enterprise`, and so on should be written directly under `spec`.
|
||||
|
||||
@@ -420,8 +434,7 @@ kind: RunnerSet
|
||||
metadata:
|
||||
name: example
|
||||
spec:
|
||||
ephemeral: false
|
||||
replicas: 2
|
||||
replicas: 1
|
||||
repository: mumoshu/actions-runner-controller-ci
|
||||
dockerdWithinRunnerContainer: true
|
||||
template:
|
||||
@@ -429,7 +442,7 @@ spec:
|
||||
securityContext:
|
||||
# All level/role/type/user values will vary based on your SELinux policies.
|
||||
# See https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html/container_security_guide/docker_selinux_security_policy for information about SELinux with containers
|
||||
seLinuxOptions:
|
||||
seLinuxOptions:
|
||||
level: "s0"
|
||||
role: "system_r"
|
||||
type: "super_t"
|
||||
@@ -444,6 +457,17 @@ spec:
|
||||
requests:
|
||||
cpu: "2.0"
|
||||
memory: "4Gi"
|
||||
# This is an advanced configuration. Don't touch it unless you know what you're doing.
|
||||
securityContext:
|
||||
# Usually, the runner container's privileged field is derived from dockerdWithinRunnerContainer.
|
||||
# But in the case where you need to run privileged job steps even if you don't use docker/don't need dockerd within the runner container,
|
||||
# just specified `privileged: true` like this.
|
||||
# See https://github.com/actions-runner-controller/actions-runner-controller/issues/1282
|
||||
# Do note that specifying `privileged: false` while using dind is very likely to fail, even if you use some vm-based container runtimes
|
||||
# like firecracker and kata. Basically they run containers within dedicated micro vms and so
|
||||
# it's more like you can use `privileged: true` safer with those runtimes.
|
||||
#
|
||||
# privileged: true
|
||||
- name: docker
|
||||
resources:
|
||||
limits:
|
||||
@@ -461,7 +485,6 @@ Under the hood, `RunnerSet` relies on Kubernetes's `StatefulSet` and Mutating We
|
||||
**Limitations**
|
||||
|
||||
* For autoscaling the `RunnerSet` kind only supports pull driven scaling or the `workflow_job` event for webhook driven scaling.
|
||||
* Whilst `RunnerSets` support all runner modes as well as autoscaling, currently PVs are **NOT** automatically cleaned up as they are still bound to their respective PVCs when a runner is deleted by the controller. This has **major** implications when using `RunnerSets` in the standard runner mode, `ephemeral: true`, see [persistent runners](#persistent-runners) for more details. As a result of this, using the default ephemeral configuration or implementing autoscaling for your `RunnerSets`, you will get a build-up of PVCs and PVs without some sort of custom solution for cleaning up.
|
||||
|
||||
### Persistent Runners
|
||||
|
||||
@@ -489,7 +512,7 @@ A `RunnerDeployment` or `RunnerSet` can scale the number of runners between `min
|
||||
|
||||
#### Anti-Flapping Configuration
|
||||
|
||||
For both pull driven or webhook driven scaling an anti-flapping implementation is included, by default a runner won't be scaled down within 10 minutes of it having been scaled up.
|
||||
For both pull driven or webhook driven scaling an anti-flapping implementation is included, by default a runner won't be scaled down within 10 minutes of it having been scaled up.
|
||||
|
||||
This anti-flap configuration also has the final say on if a runner can be scaled down or not regardless of the chosen scaling method.
|
||||
|
||||
@@ -536,7 +559,7 @@ spec:
|
||||
|
||||
> To configure webhook driven scaling see the [Webhook Driven Scaling](#webhook-driven-scaling) section
|
||||
|
||||
The pull based metrics are configured in the `metrics` attribute of a HRA (see snippet below). The period between polls is defined by the controller's `--sync-period` flag. If this flag isn't provided then the controller defaults to a sync period of `1m`, this can be configured in seconds or minutes.
|
||||
The pull based metrics are configured in the `metrics` attribute of a HRA (see snippet below). The period between polls is defined by the controller's `--sync-period` flag. If this flag isn't provided then the controller defaults to a sync period of `1m`, this can be configured in seconds or minutes.
|
||||
|
||||
Be aware that the shorter the sync period the quicker you will consume your rate limit budget, depending on your environment this may or may not be a risk. Consider monitoring ARCs rate limit budget when configuring this feature to find the optimal performance sync period.
|
||||
|
||||
@@ -554,7 +577,7 @@ spec:
|
||||
minReplicas: 1
|
||||
maxReplicas: 5
|
||||
# Your chosen scaling metrics here
|
||||
metrics: []
|
||||
metrics: []
|
||||
```
|
||||
|
||||
**Metric Options:**
|
||||
@@ -692,7 +715,7 @@ With the above example, the webhook server scales `example-runners` by `1` repli
|
||||
|
||||
Of note is the `HRA.spec.scaleUpTriggers[].duration` attribute. This attribute is used to calculate if the replica number added via the trigger is expired or not. On each reconciliation loop, the controller sums up all the non-expiring replica numbers from previous scale-up triggers. It then compares the summed desired replica number against the current replica number. If the summed desired replica number > the current number then it means the replica count needs to scale up.
|
||||
|
||||
As mentioned previously, the `scaleDownDelaySecondsAfterScaleOut` property has the final say still. If the latest scale-up time + the anti-flapping duration is later than the current time, it doesn’t immediately scale up and instead retries the calculation again later to see if it needs to scale yet.
|
||||
As mentioned previously, the `scaleDownDelaySecondsAfterScaleOut` property has the final say still. If the latest scale-up time + the anti-flapping duration is later than the current time, it doesn’t immediately scale down and instead retries the calculation again later to see if it needs to scale yet.
|
||||
|
||||
---
|
||||
|
||||
@@ -700,31 +723,164 @@ The primary benefit of autoscaling on Webhooks compared to the pull driven scali
|
||||
|
||||
> You can learn the implementation details in [#282](https://github.com/actions-runner-controller/actions-runner-controller/pull/282)
|
||||
|
||||
##### Install with Helm
|
||||
|
||||
To enable this feature, you first need to install the GitHub webhook server. To install via our Helm chart,
|
||||
_[see the values documentation for all configuration options](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/charts/actions-runner-controller/README.md)_
|
||||
|
||||
```console
|
||||
$ helm upgrade --install --namespace actions-runner-system --create-namespace \
|
||||
--wait actions-runner-controller actions-runner-controller/actions-runner-controller \
|
||||
--set "githubWebhookServer.enabled=true,githubWebhookServer.ports[0].nodePort=33080"
|
||||
--set "githubWebhookServer.enabled=true,service.type=NodePort,githubWebhookServer.ports[0].nodePort=33080"
|
||||
```
|
||||
|
||||
The above command will result in exposing the node port 33080 for Webhook events. Usually, you need to create an
|
||||
external load balancer targeted to the node port, and register the hostname or the IP address of the external load balancer
|
||||
to the GitHub Webhook.
|
||||
The above command will result in exposing the node port 33080 for Webhook events.
|
||||
Usually, you need to create an external load balancer targeted to the node port,
|
||||
and register the hostname or the IP address of the external load balancer to the GitHub Webhook.
|
||||
|
||||
Once you were able to confirm that the Webhook server is ready and running from GitHub - this is usually verified by the
|
||||
GitHub sending PING events to the Webhook server - create or update your `HorizontalRunnerAutoscaler` resources
|
||||
by learning the following configuration examples.
|
||||
**With a custom Kubernetes ingress controller:**
|
||||
|
||||
> **CAUTION:** The Kubernetes ingress controllers described below is just a suggestion from the community and
|
||||
> the ARC team will not provide any user support for ingress controllers as it's not a part of this project.
|
||||
>
|
||||
> The following guide on creating an ingress has been contributed by the awesome ARC community and is provided here as-is.
|
||||
> You may, however, still be able to ask for help on the community on GitHub Discussions if you have any problems.
|
||||
|
||||
Kubernetes provides `Ingress` resources to let you configure your ingress controller to expose a Kubernetes service.
|
||||
If you plan to expose ARC via Ingress, you might not be required to make it a `NodePort` service
|
||||
(although nothing would prevent an ingress controller to expose NodePort services too):
|
||||
|
||||
```console
|
||||
$ helm upgrade --install --namespace actions-runner-system --create-namespace \
|
||||
--wait actions-runner-controller actions-runner-controller/actions-runner-controller \
|
||||
--set "githubWebhookServer.enabled=true"
|
||||
```
|
||||
|
||||
The command above will create a new deployment and a service for receiving Github Webhooks on the `actions-runner-system` namespace.
|
||||
|
||||
Now we need to expose this service so that GitHub can send these webhooks over the network with TSL protection.
|
||||
|
||||
You can do it in any way you prefer, here we'll suggest doing it with a k8s Ingress.
|
||||
For the sake of this example we'll expose this service on the following URL:
|
||||
|
||||
- https://your.domain.com/actions-runner-controller-github-webhook-server
|
||||
|
||||
Where `your.domain.com` should be replaced by your own domain.
|
||||
|
||||
> Note: This step assumes you already have a configured `cert-manager` and domain name for your cluster.
|
||||
|
||||
Let's start by creating an Ingress file called `arc-webhook-server.yaml` with the following contents:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: actions-runner-controller-github-webhook-server
|
||||
namespace: actions-runner-system
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- your.domain.com
|
||||
secretName: your-tls-secret-name
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /actions-runner-controller-github-webhook-server
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: actions-runner-controller-github-webhook-server
|
||||
port:
|
||||
number: 80
|
||||
```
|
||||
|
||||
Make sure to set the `spec.tls.secretName` to the name of your TLS secret and
|
||||
`spec.tls.hosts[0]` to your own domain.
|
||||
|
||||
Then create this resource on your cluster with the following command:
|
||||
|
||||
```bash
|
||||
kubectl apply -n actions-runner-system -f arc-webhook-server.yaml
|
||||
```
|
||||
|
||||
**Configuring GitHub for sending webhooks for our newly created webhook server:**
|
||||
|
||||
After this step your webhook server should be ready to start receiving webhooks from GitHub.
|
||||
|
||||
To configure GitHub to start sending you webhooks, go to the settings page of your repository
|
||||
or organization then click on `Webhooks`, then on `Add webhook`.
|
||||
|
||||
There set the "Payload URL" field with the webhook URL you just created,
|
||||
if you followed the example ingress above the URL would be something like this:
|
||||
|
||||
- https://your.domain.com/actions-runner-controller-github-webhook-server
|
||||
|
||||
> Remember to replace `your.domain.com` with your own domain.
|
||||
|
||||
Then click on "let me select individual events" and choose `Workflow Jobs`.
|
||||
|
||||
You may also want to choose the following event(s) if you use it as a scale trigger in your HRA spec:
|
||||
|
||||
- Check runs
|
||||
- Pushes
|
||||
- Pull Requests
|
||||
|
||||
Later you can remove any of these you are not using to reduce the amount of data sent to your server.
|
||||
|
||||
Then click on `Add Webhook`.
|
||||
|
||||
GitHub will then send a `ping` event to your webhook server to check if it is working, if it is you'll see a green V mark
|
||||
alongside your webhook on the Settings -> Webhooks page.
|
||||
|
||||
Once you were able to confirm that the Webhook server is ready and running from GitHub create or update your
|
||||
`HorizontalRunnerAutoscaler` resources by learning the following configuration examples.
|
||||
|
||||
##### Install with Kustomize
|
||||
|
||||
To install this feature using Kustomize, add `github-webhook-server` resources to your `kustomization.yaml` file as in the example below:
|
||||
|
||||
```yaml
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
# You should already have this
|
||||
- github.com/actions-runner-controller/actions-runner-controller/config//default?ref=v0.22.2
|
||||
# Add the below!
|
||||
- github.com/actions-runner-controller/actions-runner-controller/config//github-webhook-server?ref=v0.22.2
|
||||
|
||||
Finally, you will have to configure an ingress so that you may configure the webhook in github. An example of such ingress can be find below:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: actions-runners-webhook-server
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
service:
|
||||
name: github-webhook-server
|
||||
port:
|
||||
number: 80
|
||||
pathType: Exact
|
||||
|
||||
```
|
||||
|
||||
##### Examples
|
||||
|
||||
- [Example 1: Scale on each `workflow_job` event](#example-1-scale-on-each-workflow_job-event)
|
||||
- [Example 2: Scale up on each `check_run` event](#example-2-scale-up-on-each-check_run-event)
|
||||
- [Example 3: Scale on each `pull_request` event against a given set of branches](#example-3-scale-on-each-pull_request-event-against-a-given-set-of-branches)
|
||||
- [Example 4: Scale on each `push` event](#example-4-scale-on-each-push-event)
|
||||
|
||||
**Note:** All these examples should have **minReplicas** & **maxReplicas** as mandatory parameters even for webhook driven scaling.
|
||||
|
||||
##### Example 1: Scale on each `workflow_job` event
|
||||
###### Example 1: Scale on each `workflow_job` event
|
||||
|
||||
> This feature requires controller version => [v0.20.0](https://github.com/actions-runner-controller/actions-runner-controller/releases/tag/v0.20.0)
|
||||
|
||||
@@ -735,16 +891,23 @@ The most flexible webhook GitHub offers is the `workflow_job` webhook, it includ
|
||||
This webhook should cover most people's needs, please experiment with this webhook first before considering the others.
|
||||
|
||||
```yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: example-runners
|
||||
name: example-runners
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
repository: example/myrepo
|
||||
---
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
metadata:
|
||||
name: example-runners
|
||||
spec:
|
||||
scaleDownDelaySecondsAfterScaleOut: 300
|
||||
minReplicas: 1
|
||||
maxReplicas: 10
|
||||
scaleTargetRef:
|
||||
name: example-runners
|
||||
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
||||
@@ -761,7 +924,7 @@ You can configure your GitHub webhook settings to only include `Workflows Job` e
|
||||
|
||||
Each kind has a `status` of `queued`, `in_progress` and `completed`. With the above configuration, `actions-runner-controller` adds one runner for a `workflow_job` event whose `status` is `queued`. Similarly, it removes one runner for a `workflow_job` event whose `status` is `completed`. The caveat to this to remember is that this scale-down is within the bounds of your `scaleDownDelaySecondsAfterScaleOut` configuration, if this time hasn't passed the scale down will be deferred.
|
||||
|
||||
##### Example 2: Scale up on each `check_run` event
|
||||
###### Example 2: Scale up on each `check_run` event
|
||||
|
||||
> Note: This should work almost like https://github.com/philips-labs/terraform-aws-github-runner
|
||||
|
||||
@@ -778,6 +941,8 @@ spec:
|
||||
---
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
spec:
|
||||
minReplicas: 1
|
||||
maxReplicas: 10
|
||||
scaleTargetRef:
|
||||
name: example-runners
|
||||
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
||||
@@ -804,6 +969,8 @@ spec:
|
||||
---
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
spec:
|
||||
minReplicas: 1
|
||||
maxReplicas: 10
|
||||
scaleTargetRef:
|
||||
name: example-runners
|
||||
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
||||
@@ -819,7 +986,7 @@ spec:
|
||||
duration: "5m"
|
||||
```
|
||||
|
||||
##### Example 3: Scale on each `pull_request` event against a given set of branches
|
||||
###### Example 3: Scale on each `pull_request` event against a given set of branches
|
||||
|
||||
To scale up replicas of the runners for `example/myrepo` by 1 for 5 minutes on each `pull_request` against the `main` or `develop` branch you write manifests like the below:
|
||||
|
||||
@@ -834,6 +1001,8 @@ spec:
|
||||
---
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
spec:
|
||||
minReplicas: 1
|
||||
maxReplicas: 10
|
||||
scaleTargetRef:
|
||||
name: example-runners
|
||||
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
||||
@@ -862,6 +1031,8 @@ spec:
|
||||
---
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
spec:
|
||||
minReplicas: 1
|
||||
maxReplicas: 10
|
||||
scaleTargetRef:
|
||||
name: example-runners
|
||||
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
||||
@@ -968,9 +1139,13 @@ The earlier entry is prioritized higher than later entries. So you usually defin
|
||||
|
||||
A common use case for this may be to have 1 override to scale to 0 during the week outside of core business hours and another override to scale to 0 during all hours of the weekend.
|
||||
|
||||
### Runner with DinD
|
||||
### Alternative Runners
|
||||
|
||||
When using the default runner, the runner pod starts up 2 containers: runner and DinD (Docker-in-Docker). This might create issues if there's `LimitRange` set to namespace.
|
||||
ARC also offers a few altenrative runner options
|
||||
|
||||
#### Runner with DinD
|
||||
|
||||
When using the default runner, the runner pod starts up 2 containers: runner and DinD (Docker-in-Docker). ARC maintains an alternative all in one runner image with docker running in the same container as the runner. This may be prefered from a resource or complexity perspective or to be compliant with a `LimitRange` namespace configuration.
|
||||
|
||||
```yaml
|
||||
# dindrunnerdeployment.yaml
|
||||
@@ -988,7 +1163,35 @@ spec:
|
||||
env: []
|
||||
```
|
||||
|
||||
This also helps with resources, as you don't need to give resources separately to docker and runner.
|
||||
#### Runner with K8s Jobs
|
||||
|
||||
When using the default runner, jobs that use a container will run in docker. This necessitates privileged mode, either on the runner pod or the sidecar container
|
||||
|
||||
By setting the container mode, you can instead invoke these jobs using a [kubernetes implementation](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s) while not executing in privileged mode.
|
||||
|
||||
The runner will dynamically spin up pods and k8s jobs in the runner's namespace to run the workflow, so a `workVolumeClaimTemplate` is required for the runner's working directory, and a service account with the [appropriate permissions.](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s#pre-requisites)
|
||||
|
||||
There are some [limitations](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s#limitations) to this approach, mainly [job containers](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container) are required on all workflows.
|
||||
|
||||
```yaml
|
||||
# runner.yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: Runner
|
||||
metadata:
|
||||
name: example-runner
|
||||
spec:
|
||||
repository: example/myrepo
|
||||
containerMode: kubernetes
|
||||
serviceAccountName: my-service-account
|
||||
workVolumeClaimTemplate:
|
||||
storageClassName: "my-dynamic-storage-class"
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
env: []
|
||||
```
|
||||
|
||||
### Additional Tweaks
|
||||
|
||||
@@ -1007,6 +1210,7 @@ spec:
|
||||
annotations:
|
||||
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
|
||||
spec:
|
||||
priorityClassName: "high"
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/test: ""
|
||||
|
||||
@@ -1079,7 +1283,7 @@ spec:
|
||||
# Valid only when dockerdWithinRunnerContainer=false
|
||||
dockerEnv:
|
||||
- name: HTTP_PROXY
|
||||
value: http://example.com
|
||||
value: http://example.com
|
||||
# Docker sidecar container image tweaks examples below, only applicable if dockerdWithinRunnerContainer = false
|
||||
dockerdContainerResources:
|
||||
limits:
|
||||
@@ -1138,13 +1342,26 @@ spec:
|
||||
# This must match the name of a RuntimeClass resource available on the cluster.
|
||||
# More info: https://kubernetes.io/docs/concepts/containers/runtime-class
|
||||
runtimeClassName: "runc"
|
||||
# This is an advanced configuration. Don't touch it unless you know what you're doing.
|
||||
containers:
|
||||
- name: runner
|
||||
# Usually, the runner container's privileged field is derived from dockerdWithinRunnerContainer.
|
||||
# But in the case where you need to run privileged job steps even if you don't use docker/don't need dockerd within the runner container,
|
||||
# just specified `privileged: true` like this.
|
||||
# See https://github.com/actions-runner-controller/actions-runner-controller/issues/1282
|
||||
# Do note that specifying `privileged: false` while using dind is very likely to fail, even if you use some vm-based container runtimes
|
||||
# like firecracker and kata. Basically they run containers within dedicated micro vms and so
|
||||
# it's more like you can use `privileged: true` safer with those runtimes.
|
||||
#
|
||||
# privileged: true
|
||||
```
|
||||
|
||||
### Custom Volume mounts
|
||||
You can configure your own custom volume mounts. For example to have the work/docker data in memory or on NVME SSD, for
|
||||
i/o intensive builds. Other custom volume mounts should be possible as well, see [kubernetes documentation](https://kubernetes.io/docs/concepts/storage/volumes/)
|
||||
|
||||
**RAM Disk Runner**<br />
|
||||
#### RAM Disk
|
||||
|
||||
Example how to place the runner work dir, docker sidecar and /tmp within the runner onto a ramdisk.
|
||||
```yaml
|
||||
kind: RunnerDeployment
|
||||
@@ -1167,14 +1384,15 @@ spec:
|
||||
- name: tmp
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
emphemeral: true # recommended to not leak data between builds.
|
||||
ephemeral: true # recommended to not leak data between builds.
|
||||
```
|
||||
|
||||
**NVME SSD Runner**<br />
|
||||
#### NVME SSD
|
||||
|
||||
In this example we provide NVME backed storage for the workdir, docker sidecar and /tmp within the runner.
|
||||
Here we use a working example on GKE, which will provide the NVME disk at /mnt/disks/ssd0. We will be placing the respective volumes in subdirs here and in order to be able to run multiple runners we will use the pod name as a prefix for subdirectories. Also the disk will fill up over time and disk space will not be freed until the node is removed.
|
||||
|
||||
**Beware** that running these persistent backend volumes **leave data behind** between 2 different jobs on the workdir and `/tmp` with `emphemeral: false`.
|
||||
**Beware** that running these persistent backend volumes **leave data behind** between 2 different jobs on the workdir and `/tmp` with `ephemeral: false`.
|
||||
|
||||
```yaml
|
||||
kind: RunnerDeployment
|
||||
@@ -1215,7 +1433,126 @@ spec:
|
||||
- hostPath:
|
||||
path: /mnt/disks/ssd0
|
||||
name: tmp
|
||||
emphemeral: true # VERY important. otherwise data inside the workdir and /tmp is not cleared between builds
|
||||
ephemeral: true # VERY important. otherwise data inside the workdir and /tmp is not cleared between builds
|
||||
```
|
||||
|
||||
#### Docker image layers caching
|
||||
|
||||
> **Note**: Ensure that the volume mount is added to the container that is running the Docker daemon.
|
||||
|
||||
`docker` stores pulled and built image layers in the [daemon's (note not client)](https://docs.docker.com/get-started/overview/#docker-architecture) [local storage area](https://docs.docker.com/storage/storagedriver/#sharing-promotes-smaller-images) which is usually at `/var/lib/docker`.
|
||||
|
||||
By leveraging RunnerSet's dynamic PV provisioning feature and your CSI driver, you can let ARC maintain a pool of PVs that are
|
||||
reused across runner pods to retain `/var/lib/docker`.
|
||||
|
||||
_Be sure to add the volume mount to the container that is supposed to run the docker daemon._
|
||||
|
||||
By default, ARC creates a sidecar container named `docker` within the runner pod for running the docker daemon. In that case,
|
||||
it's where you need the volume mount so that the manifest looks like:
|
||||
|
||||
```yaml
|
||||
kind: RunnerSet
|
||||
metadata:
|
||||
name: example
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: docker
|
||||
volumeMounts:
|
||||
- name: var-lib-docker
|
||||
mountPath: /var/lib/docker
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: var-lib-docker
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Mi
|
||||
storageClassName: var-lib-docker
|
||||
```
|
||||
|
||||
With `dockerdWithinRunnerContainer: true`, you need to add the volume mount to the `runner` container.
|
||||
|
||||
#### Go module and build caching
|
||||
|
||||
`Go` is known to cache builds under `$HOME/.cache/go-build` and downloaded modules under `$HOME/pkg/mod`.
|
||||
The module cache dir can be customized by setting `GOMOD_CACHE` so by setting it to somewhere under `$HOME/.cache`,
|
||||
we can have a single PV to host both build and module cache, which might improve Go module downloading and building time.
|
||||
|
||||
```yaml
|
||||
kind: RunnerSet
|
||||
metadata:
|
||||
name: example
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: runner
|
||||
env:
|
||||
- name: GOMODCACHE
|
||||
value: "/home/runner/.cache/go-mod"
|
||||
volumeMounts:
|
||||
- name: cache
|
||||
mountPath: "/home/runner/.cache"
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: cache
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Mi
|
||||
storageClassName: cache
|
||||
```
|
||||
|
||||
#### PV-backed runner work directory
|
||||
|
||||
ARC works by automatically creating runner pods for running [`actions/runner`](https://github.com/actions/runner) and [running `config.sh`](https://docs.github.com/en/actions/hosting-your-own-runners/adding-self-hosted-runners#adding-a-self-hosted-runner-to-a-repository) which you had to ran manually without ARC.
|
||||
|
||||
`config.sh` is the script provided by `actions/runner` to pre-configure the runner process before being started. One of the options provided by `config.sh` is `--work`,
|
||||
which specifies the working directory where the runner runs your workflow jobs in.
|
||||
|
||||
The volume and the partition that hosts the work directory should have several or dozens of GBs free space that might be used by your workflow jobs.
|
||||
|
||||
By default, ARC uses `/runner/_work` as work directory, which is powered by Kubernetes's `emptyDir`. [`emptyDir` is usually backed by a directory created within a host's volume](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir), somewhere under `/var/lib/kuberntes/pods`. Therefore
|
||||
your host's volume that is backing `/var/lib/kubernetes/pods` must have enough free space to serve all the concurrent runner pods that might be deployed onto your host at the same time.
|
||||
|
||||
So, in case you see a job failure seemingly due to "disk full", it's very likely you need to reconfigure your host to have more free space.
|
||||
|
||||
In case you can't rely on host's volume, consider using `RunnerSet` and backing the work directory with a ephemeral PV.
|
||||
|
||||
Kubernetes 1.23 or greater provides the support for [generic ephemeral volumes](https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes), which is designed to support this exact use-case. It's defined in the Pod spec API so it isn't currently available for `RunnerDeployment`. `RunnerSet` is based on Kubernetes' `StatefulSet` which mostly embeds the Pod spec under `spec.template.spec`, so there you go.
|
||||
|
||||
```yaml
|
||||
kind: RunnerSet
|
||||
metadata:
|
||||
name: example
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: runner
|
||||
volumeMounts:
|
||||
- mountPath: /runner/_work
|
||||
name: work
|
||||
- name: docker
|
||||
volumeMounts:
|
||||
- mountPath: /runner/_work
|
||||
name: work
|
||||
volumes:
|
||||
- name: work
|
||||
ephemeral:
|
||||
volumeClaimTemplate:
|
||||
spec:
|
||||
accessModes: [ "ReadWriteOnce" ]
|
||||
storageClassName: "runner-work-dir"
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
```
|
||||
|
||||
### Runner Labels
|
||||
@@ -1231,7 +1568,6 @@ jobs:
|
||||
When you have multiple kinds of self-hosted runners, you can distinguish between them using labels. In order to do so, you can specify one or more labels in your `Runner` or `RunnerDeployment` spec.
|
||||
|
||||
```yaml
|
||||
# runnerdeployment.yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
@@ -1253,7 +1589,10 @@ jobs:
|
||||
runs-on: custom-runner
|
||||
```
|
||||
|
||||
Note that if you specify `self-hosted` in your workflow, then this will run your job on _any_ self-hosted runner, regardless of the labels that they have.
|
||||
When using labels there are a few things to be aware of:
|
||||
|
||||
1. `self-hosted` is implict with every runner as this is an automatic label GitHub apply to any self-hosted runner. As a result ARC can treat all runners as having this label without having it explicitly defined in a runner's manifest. You do not need to explicitly define this label in your runner manifests (you can if you want though).
|
||||
2. In addition to the `self-hosted` label, GitHub also applies a few other [default](https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow#using-default-labels-to-route-jobs) labels to any self-hosted runner. The other default labels relate to the architecture of the runner and so can't be implicitly applied by ARC as ARC doesn't know if the runner is `linux` or `windows`, `x64` or `ARM64` etc. If you wish to use these labels in your workflows and have ARC scale runners accurately you must also add them to your runner manifests.
|
||||
|
||||
### Runner Groups
|
||||
|
||||
@@ -1262,7 +1601,6 @@ Runner groups can be used to limit which repositories are able to use the GitHub
|
||||
To add the runner to the group `NewGroup`, specify the group in your `Runner` or `RunnerDeployment` spec.
|
||||
|
||||
```yaml
|
||||
# runnerdeployment.yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
@@ -1311,13 +1649,6 @@ spec:
|
||||
# Disables automatic runner updates
|
||||
- name: DISABLE_RUNNER_UPDATE
|
||||
value: "true"
|
||||
# Configure runner with --ephemeral instead of --once flag
|
||||
# WARNING | THIS ENV VAR IS DEPRECATED AND WILL BE REMOVED
|
||||
# IN A FUTURE VERSION OF ARC. IN 0.22.0 ARC SETS --ephemeral VIA
|
||||
# THE CONTROLLER SETTING THIS ENV VAR ON POD CREATION.
|
||||
# THIS ENV VAR WILL BE REMOVED, SEE ISSUE #1196 FOR DETAILS
|
||||
- name: RUNNER_FEATURE_FLAG_EPHEMERAL
|
||||
value: "true"
|
||||
```
|
||||
|
||||
### Using IRSA (IAM Roles for Service Accounts) in EKS
|
||||
@@ -1412,6 +1743,64 @@ $ helm --upgrade install actions-runner-controller/actions-runner-controller \
|
||||
admissionWebHooks.caBundle=${CA_BUNDLE}
|
||||
```
|
||||
|
||||
### Multitenancy
|
||||
|
||||
> This feature requires controller version => [v0.26.0](https://github.com/actions-runner-controller/actions-runner-controller/releases/tag/v0.26.0)
|
||||
|
||||
In a large enterprise, there might be many GitHub organizations that requires self-hosted runners. Previously, the only way to provide ARC-managed self-hosted runners in such environment was [Deploying Multiple Controllers](#deploying-multiple-controllers), which incurs overhead due to it requires one ARC installation per GitHub organization.
|
||||
|
||||
With multitenancy, you can let ARC manage self-hosted runners across organizations. It's enabled by default and the only thing you need to start using it is to set the `spec.githubAPICredentialsFrom.secretRef.name` fields for the following resources:
|
||||
|
||||
- `HorizontalRunnerAutoscaler`
|
||||
- `RunnerSet`
|
||||
|
||||
Or `spec.template.spec.githubAPICredentialsFrom.secretRef.name` field for the following resource:
|
||||
|
||||
- `RunnerDeployment`
|
||||
|
||||
> Although not explained above, `spec.githubAPICredentialsFrom` fields do exist in `Runner` and `RunnerReplicaSet`. A comparable pod annotation exists for the runner pod, too.
|
||||
> However, note that `Runner`, `RunnerReplicaSet` and runner pods are implementation details and are managed by `RunnerDeployment` and ARC.
|
||||
> Usually you don't need to manually set the fields for those resources.
|
||||
|
||||
`githubAPICredentialsFrom.secretRef.name` should refer to the name of the Kubernetes secret that contains either PAT or GitHub App credentials that is used for GitHub API calls for the said resource.
|
||||
|
||||
Usually, you should have a set of GitHub App credentials per a GitHub organization and you would have a RunnerDeployment and a HorizontalRunnerAutoscaler per an organization runner group. So, you might end up having the following resources for each organization:
|
||||
|
||||
- 1 Kuernetes secret that contains GitHub App credentials
|
||||
- 1 RunnerDeployment/RunnerSet and 1 HorizontalRunnerAutoscaler per Runner Group
|
||||
|
||||
And the RunnerDeployment/RunnerSet and HorizontalRunnerAutoscaler should have the same value for `spec.githubAPICredentialsFrom.secretRef.name`, which refers to the name of the Kubernetes secret.
|
||||
|
||||
```yaml
|
||||
kind: Secret
|
||||
data:
|
||||
github_app_id: ...
|
||||
github_app_installation_id: ...
|
||||
github_app_private_key: ...
|
||||
---
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
namespace: org1-runners
|
||||
spec:
|
||||
githubAPICredentialsFrom:
|
||||
secretRef:
|
||||
name: org1-github-app
|
||||
---
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
metadata:
|
||||
namespace: org1-runners
|
||||
spec:
|
||||
githubAPICredentialsFrom:
|
||||
secretRef:
|
||||
name: org1-github-app
|
||||
```
|
||||
|
||||
> Do note that, as shown in the above example, you usually set the same secret name to `githubAPICredentialsFrom.secretRef.name` fields of both `RunnerDeployment` and `HorizontalRunnerAutoscaler`, so that GitHub API calls for the same set of runners shares the specified credentials, regardless of
|
||||
when and which varying ARC component(`horizontalrunnerautoscaler-controller`, `runnerdeployment-controller`, `runnerreplicaset-controller`, `runner-controller` or `runnerpod-controller`) makes specific API calls.
|
||||
> Just don't be surprised you have to repeat `githubAPICredentialsFrom.secretRef.name` settings among two resources!
|
||||
|
||||
Please refer to [Deploying Using GitHub App Authentication](#deploying-using-github-app-authentication) for how you could create the Kubernetes secret containing GitHub App credentials.
|
||||
|
||||
# Troubleshooting
|
||||
|
||||
See [troubleshooting guide](TROUBLESHOOTING.md) for solutions to various problems people have run into consistently.
|
||||
|
||||
22
SECURITY.md
Normal file
22
SECURITY.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Security Policy
|
||||
|
||||
## Sponsoring the project
|
||||
|
||||
This project is maintained by a small team of two and therefore lacks the resource to provide security fixes in a timely manner.
|
||||
|
||||
If you have important business(es) that relies on this project, please consider sponsoring the project so that the maintainer(s) can commit to providing such service.
|
||||
|
||||
Please refer to https://github.com/sponsors/actions-runner-controller for available tiers.
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 0.23.0 | :white_check_mark: |
|
||||
| < 0.23.0| :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
To report a security issue, please email ykuoka+arcsecurity(at)gmail.com with a description of the issue, the steps you took to create the issue, affected versions, and, if known, mitigations for the issue.
|
||||
|
||||
A maintainer will try to respond within 5 working days. If the issue is confirmed as a vulnerability, a Security Advisory will be opened. This project tries to follow a 90 day disclosure timeline.
|
||||
@@ -1,10 +1,93 @@
|
||||
# Troubleshooting
|
||||
|
||||
* [Invalid header field value](#invalid-header-field-value)
|
||||
* [Runner coming up before network available](#runner-coming-up-before-network-available)
|
||||
* [Deployment fails on GKE due to webhooks](#deployment-fails-on-gke-due-to-webhooks)
|
||||
* [Tools](#tools)
|
||||
* [Installation](#installation)
|
||||
* [InternalError when calling webhook: context deadline exceeded](#internalerror-when-calling-webhook-context-deadline-exceeded)
|
||||
* [Invalid header field value](#invalid-header-field-value)
|
||||
* [Helm chart install failure: certificate signed by unknown authority](#helm-chart-install-failure-certificate-signed-by-unknown-authority)
|
||||
* [Operations](#operations)
|
||||
* [Stuck runner kind or backing pod](#stuck-runner-kind-or-backing-pod)
|
||||
* [Delay in jobs being allocated to runners](#delay-in-jobs-being-allocated-to-runners)
|
||||
* [Runner coming up before network available](#runner-coming-up-before-network-available)
|
||||
* [Outgoing network action hangs indefinitely](#outgoing-network-action-hangs-indefinitely)
|
||||
* [Unable to scale to zero with TotalNumberOfQueuedAndInProgressWorkflowRuns](#unable-to-scale-to-zero-with-totalnumberofqueuedandinprogressworkflowruns)
|
||||
|
||||
## Invalid header field value
|
||||
## Tools
|
||||
|
||||
A list of tools which are helpful for troubleshooting
|
||||
|
||||
* https://github.com/rewanthtammana/kubectl-fields Kubernetes resources hierarchy parsing tool
|
||||
* https://github.com/stern/stern Multi pod and container log tailing for Kubernetes
|
||||
|
||||
## Installation
|
||||
|
||||
Troubeshooting runbooks that relate to ARC installation problems
|
||||
|
||||
### InternalError when calling webhook: context deadline exceeded
|
||||
|
||||
**Problem**
|
||||
|
||||
This issue can come up for various reasons like leftovers from previous installations or not being able to access the K8s service's clusterIP associated with the admission webhook server (of ARC).
|
||||
|
||||
```
|
||||
Internal error occurred: failed calling webhook "mutate.runnerdeployment.actions.summerwind.dev":
|
||||
Post "https://actions-runner-controller-webhook.actions-runner-system.svc:443/mutate-actions-summerwind-dev-v1alpha1-runnerdeployment?timeout=10s": context deadline exceeded
|
||||
```
|
||||
|
||||
**Solution**
|
||||
|
||||
First we will try the common solution of checking webhook leftovers from previous installations:
|
||||
|
||||
1. ```bash
|
||||
kubectl get validatingwebhookconfiguration -A
|
||||
kubectl get mutatingwebhookconfiguration -A
|
||||
```
|
||||
2. If you see any webhooks related to actions-runner-controller, delete them:
|
||||
```bash
|
||||
kubectl delete mutatingwebhookconfiguration actions-runner-controller-mutating-webhook-configuration
|
||||
kubectl delete validatingwebhookconfiguration actions-runner-controller-validating-webhook-configuration
|
||||
```
|
||||
|
||||
If that didn't work then probably your K8s control-plane is somehow unable to access the K8s service's clusterIP associated with the admission webhook server:
|
||||
1. You're running apiserver as a binary and you didn't make service cluster IPs available to the host network.
|
||||
2. You're running the apiserver in the pod but your pod network (i.e. CNI plugin installation and config) is not good so your pods(like kube-apiserver) in the K8s control-plane nodes can't access ARC's admission webhook server pod(s) in probably data-plane nodes.
|
||||
|
||||
|
||||
Another reason could be due to GKEs firewall settings you may run into the following errors when trying to deploy runners on a private GKE cluster:
|
||||
|
||||
To fix this, you may either:
|
||||
|
||||
1. Configure the webhook to use another port, such as 443 or 10250, [each of
|
||||
which allow traffic by default](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules).
|
||||
|
||||
```sh
|
||||
# With helm, you'd set `webhookPort` to the port number of your choice
|
||||
# See https://github.com/actions-runner-controller/actions-runner-controller/pull/1410/files for more information
|
||||
helm upgrade --install --namespace actions-runner-system --create-namespace \
|
||||
--wait actions-runner-controller actions-runner-controller/actions-runner-controller \
|
||||
--set webhookPort=10250
|
||||
```
|
||||
|
||||
2. Set up a firewall rule to allow the master node to connect to the default
|
||||
webhook port. The exact way to do this may vary, but the following script
|
||||
should point you in the right direction:
|
||||
|
||||
```sh
|
||||
# 1) Retrieve the network tag automatically given to the worker nodes
|
||||
# NOTE: this only works if you have only one cluster in your GCP project. You will have to manually inspect the result of this command to find the tag for the cluster you want to target
|
||||
WORKER_NODES_TAG=$(gcloud compute instances list --format='text(tags.items[0])' --filter='metadata.kubelet-config:*' | grep tags | awk '{print $2}' | sort | uniq)
|
||||
|
||||
# 2) Take note of the VPC network in which you deployed your cluster
|
||||
# NOTE this only works if you have only one network in which you deploy your clusters
|
||||
NETWORK=$(gcloud compute instances list --format='text(networkInterfaces[0].network)' --filter='metadata.kubelet-config:*' | grep networks | awk -F'/' '{print $NF}' | sort | uniq)
|
||||
|
||||
# 3) Get the master source ip block
|
||||
SOURCE=$(gcloud container clusters describe <cluster-name> --region <region> | grep masterIpv4CidrBlock| cut -d ':' -f 2 | tr -d ' ')
|
||||
|
||||
gcloud compute firewall-rules create k8s-cert-manager --source-ranges $SOURCE --target-tags $WORKER_NODES_TAG --allow TCP:9443 --network $NETWORK
|
||||
```
|
||||
|
||||
### Invalid header field value
|
||||
|
||||
**Problem**
|
||||
|
||||
@@ -23,7 +106,88 @@ Your base64'ed PAT token has a new line at the end, it needs to be created witho
|
||||
* `echo -n $TOKEN | base64`
|
||||
* Create the secret as described in the docs using the shell and documented flags
|
||||
|
||||
## Runner coming up before network available
|
||||
### Helm chart install failure: certificate signed by unknown authority
|
||||
|
||||
**Problem**
|
||||
|
||||
```
|
||||
Error: UPGRADE FAILED: failed to create resource: Internal error occurred: failed calling webhook "webhook.cert-manager.io": failed to call webhook: Post "https://cert-manager-webhook.cert-manager.svc:443/mutate?timeout=10s": x509: certificate signed by unknown authority
|
||||
```
|
||||
|
||||
Apparently, it's failing while `helm` is creating one of resources defined in the ARC chart and the cause was that cert-manager's webhook is not working correctly, due to the missing or the invalid CA certficate.
|
||||
|
||||
You'd try to tail logs from the `cert-manager-cainjector` and see it's failing with an error like:
|
||||
|
||||
```
|
||||
$ kubectl -n cert-manager logs cert-manager-cainjector-7cdbb9c945-g6bt4
|
||||
I0703 03:31:55.159339 1 start.go:91] "starting" version="v1.1.1" revision="3ac7418070e22c87fae4b22603a6b952f797ae96"
|
||||
I0703 03:31:55.615061 1 leaderelection.go:243] attempting to acquire leader lease kube-system/cert-manager-cainjector-leader-election...
|
||||
I0703 03:32:10.738039 1 leaderelection.go:253] successfully acquired lease kube-system/cert-manager-cainjector-leader-election
|
||||
I0703 03:32:10.739941 1 recorder.go:52] cert-manager/controller-runtime/manager/events "msg"="Normal" "message"="cert-manager-cainjector-7cdbb9c945-g6bt4_88e4bc70-eded-4343-a6fb-0ddd6434eb55 became leader" "object"={"kind":"ConfigMap","namespace":"kube-system","name":"cert-manager-cainjector-leader-election","uid":"942a021e-364c-461a-978c-f54a95723cdc","apiVersion":"v1","resourceVersion":"1576"} "reason"="LeaderElection"
|
||||
E0703 03:32:11.192128 1 start.go:119] cert-manager/ca-injector "msg"="manager goroutine exited" "error"=null
|
||||
I0703 03:32:12.339197 1 request.go:645] Throttling request took 1.047437675s, request: GET:https://10.96.0.1:443/apis/storage.k8s.io/v1beta1?timeout=32s
|
||||
E0703 03:32:13.143790 1 start.go:151] cert-manager/ca-injector "msg"="Error registering certificate based controllers. Retrying after 5 seconds." "error"="no matches for kind \"MutatingWebhookConfiguration\" in version \"admissionregistration.k8s.io/v1beta1\""
|
||||
Error: error registering secret controller: no matches for kind "MutatingWebhookConfiguration" in version "admissionregistration.k8s.io/v1beta1"
|
||||
```
|
||||
|
||||
**Solution**
|
||||
|
||||
Your cluster is based on a new enough Kubernetes of version 1.22 or greater which does not support the legacy `admissionregistration.k8s.io/v1beta1` API anymore, and your `cert-manager` is not up-to-date hence it's still trying to use the leagcy Kubernetes API.
|
||||
|
||||
In many cases, it's not an option to downgrade Kubernetes. So, just upgrade `cert-manager` to a more recent version that does have have the support for the specific Kubernetes version you're using.
|
||||
|
||||
See https://cert-manager.io/docs/installation/supported-releases/ for the list of available cert-manager versions.
|
||||
|
||||
## Operations
|
||||
|
||||
Troubeshooting runbooks that relate to ARC operational problems
|
||||
|
||||
### Stuck runner kind or backing pod
|
||||
|
||||
**Problem**
|
||||
|
||||
Sometimes either the runner kind (`kubectl get runners`) or it's underlying pod can get stuck in a terminating state for various reasons. You can get the kind unstuck by removing its finaliser using something like this:
|
||||
|
||||
**Solution**
|
||||
|
||||
Remove the finaliser from the relevent runner kind or pod
|
||||
|
||||
```
|
||||
# Get all kind runners and remove the finalizer
|
||||
$ kubectl get runners --no-headers | awk {'print $1'} | xargs kubectl patch runner --type merge -p '{"metadata":{"finalizers":null}}'
|
||||
|
||||
# Get all pods that are stuck terminating and remove the finalizer
|
||||
$ kubectl -n get pods | grep Terminating | awk {'print $1'} | xargs kubectl patch pod -p '{"metadata":{"finalizers":null}}'
|
||||
```
|
||||
|
||||
_Note the code assumes you have already selected the namespace your runners are in and that they
|
||||
are in a namespace not shared with anything else_
|
||||
|
||||
### Delay in jobs being allocated to runners
|
||||
|
||||
**Problem**
|
||||
|
||||
ARC isn't involved in jobs actually getting allocated to a runner. ARC is responsible for orchestrating runners and the runner lifecycle. Why some people see large delays in job allocation is not clear however it has been https://github.com/actions-runner-controller/actions-runner-controller/issues/1387#issuecomment-1122593984 that this is caused from the self-update process somehow.
|
||||
|
||||
**Solution**
|
||||
|
||||
Disable the self-update process in your runner manifests
|
||||
|
||||
```yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: example-runnerdeployment-with-sleep
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
...
|
||||
env:
|
||||
- name: DISABLE_RUNNER_UPDATE
|
||||
value: "true"
|
||||
```
|
||||
|
||||
### Runner coming up before network available
|
||||
|
||||
**Problem**
|
||||
|
||||
@@ -61,40 +225,48 @@ metadata:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
...
|
||||
env:
|
||||
# This runner's entrypoint script will have a 5 seconds delay
|
||||
# as a first action within the entrypoint script
|
||||
- name: STARTUP_DELAY_IN_SECONDS
|
||||
value: "5"
|
||||
```
|
||||
|
||||
## Deployment fails on GKE due to webhooks
|
||||
## Outgoing network action hangs indefinitely
|
||||
|
||||
**Problem**
|
||||
|
||||
Due to GKEs firewall settings you may run into the following errors when trying to deploy runners on a private GKE cluster:
|
||||
Some random outgoing network actions hangs indefinitely. This could be because your cluster does not give Docker the standard MTU of 1500, you can check this out by running `ip link` in a pod that encounters the problem and reading the outgoing interface's MTU value. If it is smaller than 1500, then try the following.
|
||||
|
||||
```
|
||||
Internal error occurred: failed calling webhook "mutate.runner.actions.summerwind.dev":
|
||||
Post https://webhook-service.actions-runner-system.svc:443/mutate-actions-summerwind-dev-v1alpha1-runner?timeout=10s:
|
||||
context deadline exceeded
|
||||
**Solution**
|
||||
|
||||
Add a `dockerMTU` key in your runner's spec with the value you read on the outgoing interface. For instance:
|
||||
|
||||
```yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: github-runner
|
||||
namespace: github-system
|
||||
spec:
|
||||
replicas: 6
|
||||
template:
|
||||
spec:
|
||||
dockerMTU: 1400
|
||||
repository: $username/$repo
|
||||
env: []
|
||||
```
|
||||
|
||||
**Solution**<br />
|
||||
There may be more places you need to tweak for MTU.
|
||||
Please consult issues like #651 for more information.
|
||||
|
||||
To fix this, you need to set up a firewall rule to allow the master node to connect to the webhook port.
|
||||
The exact way to do this may wary, but the following script should point you in the right direction:
|
||||
## Unable to scale to zero with TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
|
||||
```
|
||||
# 1) Retrieve the network tag automatically given to the worker nodes
|
||||
# NOTE: this only works if you have only one cluster in your GCP project. You will have to manually inspect the result of this command to find the tag for the cluster you want to target
|
||||
WORKER_NODES_TAG=$(gcloud compute instances list --format='text(tags.items[0])' --filter='metadata.kubelet-config:*' | grep tags | awk '{print $2}' | sort | uniq)
|
||||
**Problem**
|
||||
|
||||
# 2) Take note of the VPC network in which you deployed your cluster
|
||||
# NOTE this only works if you have only one network in which you deploy your clusters
|
||||
NETWORK=$(gcloud compute instances list --format='text(networkInterfaces[0].network)' --filter='metadata.kubelet-config:*' | grep networks | awk -F'/' '{print $NF}' | sort | uniq)
|
||||
HRA doesn't scale the RunnerDeployment to zero, even though you did configure HRA correctly, to have a pull-based scaling metric `TotalNumberOfQueuedAndInProgressWorkflowRuns`, and set `minReplicas: 0`.
|
||||
|
||||
# 3) Get the master source ip block
|
||||
SOURCE=$(gcloud container clusters describe <cluster-name> --region <region> | grep masterIpv4CidrBlock| cut -d ':' -f 2 | tr -d ' ')
|
||||
gcloud compute firewall-rules create k8s-cert-manager --source-ranges $SOURCE --target-tags $WORKER_NODES_TAG --allow TCP:9443 --network $NETWORK
|
||||
```
|
||||
**Solution**
|
||||
|
||||
You very likely have some dangling workflow jobs stuck in `queued` or `in_progress` as seen in [#1057](https://github.com/actions-runner-controller/actions-runner-controller/issues/1057#issuecomment-1133439061).
|
||||
|
||||
Manually call [the "list workflow runs" API](https://docs.github.com/en/rest/actions/workflow-runs#list-workflow-runs-for-a-repository), and [remove the dangling workflow job(s)](https://docs.github.com/en/rest/actions/workflow-runs#delete-a-workflow-run).
|
||||
|
||||
97
acceptance/argotunnel.sh
Executable file
97
acceptance/argotunnel.sh
Executable file
@@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# See https://developers.cloudflare.com/cloudflare-one/tutorials/many-cfd-one-tunnel/
|
||||
|
||||
kubectl create ns tunnel || :
|
||||
|
||||
kubectl -n tunnel delete secret tunnel-credentials || :
|
||||
|
||||
kubectl -n tunnel create secret generic tunnel-credentials \
|
||||
--from-file=credentials.json=$HOME/.cloudflared/${TUNNEL_ID}.json || :
|
||||
|
||||
cat <<MANIFEST | kubectl -n tunnel ${OP} -f -
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cloudflared
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cloudflared
|
||||
replicas: 2 # You could also consider elastic scaling for this deployment
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cloudflared
|
||||
spec:
|
||||
containers:
|
||||
- name: cloudflared
|
||||
image: cloudflare/cloudflared:latest
|
||||
args:
|
||||
- tunnel
|
||||
# Points cloudflared to the config file, which configures what
|
||||
# cloudflared will actually do. This file is created by a ConfigMap
|
||||
# below.
|
||||
- --config
|
||||
- /etc/cloudflared/config/config.yaml
|
||||
- run
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
# Cloudflared has a /ready endpoint which returns 200 if and only if
|
||||
# it has an active connection to the edge.
|
||||
path: /ready
|
||||
port: 2000
|
||||
failureThreshold: 1
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/cloudflared/config
|
||||
readOnly: true
|
||||
# Each tunnel has an associated "credentials file" which authorizes machines
|
||||
# to run the tunnel. cloudflared will read this file from its local filesystem,
|
||||
# and it'll be stored in a k8s secret.
|
||||
- name: creds
|
||||
mountPath: /etc/cloudflared/creds
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: creds
|
||||
secret:
|
||||
secretName: tunnel-credentials
|
||||
# Create a config.yaml file from the ConfigMap below.
|
||||
- name: config
|
||||
configMap:
|
||||
name: cloudflared
|
||||
items:
|
||||
- key: config.yaml
|
||||
path: config.yaml
|
||||
---
|
||||
# This ConfigMap is just a way to define the cloudflared config.yaml file in k8s.
|
||||
# It's useful to define it in k8s, rather than as a stand-alone .yaml file, because
|
||||
# this lets you use various k8s templating solutions (e.g. Helm charts) to
|
||||
# parameterize your config, instead of just using string literals.
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cloudflared
|
||||
data:
|
||||
config.yaml: |
|
||||
# Name of the tunnel you want to run
|
||||
tunnel: ${TUNNEL_NAME}
|
||||
credentials-file: /etc/cloudflared/creds/credentials.json
|
||||
# Serves the metrics server under /metrics and the readiness server under /ready
|
||||
metrics: 0.0.0.0:2000
|
||||
# Autoupdates applied in a k8s pod will be lost when the pod is removed or restarted, so
|
||||
# autoupdate doesn't make sense in Kubernetes. However, outside of Kubernetes, we strongly
|
||||
# recommend using autoupdate.
|
||||
no-autoupdate: true
|
||||
ingress:
|
||||
# The first rule proxies traffic to the httpbin sample Service defined in app.yaml
|
||||
- hostname: ${TUNNEL_HOSTNAME}
|
||||
service: http://actions-runner-controller-github-webhook-server.actions-runner-system:80
|
||||
# This rule matches any traffic which didn't match a previous rule, and responds with HTTP 404.
|
||||
- service: http_status:404
|
||||
MANIFEST
|
||||
|
||||
kubectl -n tunnel delete po -l app=cloudflared || :
|
||||
@@ -51,6 +51,10 @@ if [ "${tool}" == "helm" ]; then
|
||||
--set image.tag=${VERSION} \
|
||||
--set podAnnotations.test-id=${TEST_ID} \
|
||||
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
||||
--set imagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||
--set image.actionsRunnerImagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||
--set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||
--set image.imagePullPolicy=${IMAGE_PULL_POLICY} \
|
||||
-f ${VALUES_FILE}
|
||||
set +v
|
||||
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
||||
@@ -76,56 +80,3 @@ kubectl -n actions-runner-system wait deploy/actions-runner-controller --for con
|
||||
|
||||
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
|
||||
sleep 20
|
||||
|
||||
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
||||
|
||||
if [ -n "${TEST_REPO}" ]; then
|
||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl apply -f -
|
||||
else
|
||||
echo 'Deploying runnerdeployment and hra. Set USE_RUNNERSET if you want to deploy runnerset instead.'
|
||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerdeploy envsubst | kubectl apply -f -
|
||||
fi
|
||||
else
|
||||
echo 'Skipped deploying runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy.'
|
||||
fi
|
||||
|
||||
if [ -n "${TEST_ORG}" ]; then
|
||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerset envsubst | kubectl apply -f -
|
||||
else
|
||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerdeploy envsubst | kubectl apply -f -
|
||||
fi
|
||||
|
||||
if [ -n "${TEST_ORG_GROUP}" ]; then
|
||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orgroupg-runnerset envsubst | kubectl apply -f -
|
||||
else
|
||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerdeploy envsubst | kubectl apply -f -
|
||||
fi
|
||||
else
|
||||
echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ORG_GROUP to deploy.'
|
||||
fi
|
||||
else
|
||||
echo 'Skipped deploying organizational runnerdeployment. Set TEST_ORG to deploy.'
|
||||
fi
|
||||
|
||||
if [ -n "${TEST_ENTERPRISE}" ]; then
|
||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerset envsubst | kubectl apply -f -
|
||||
else
|
||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerdeploy envsubst | kubectl apply -f -
|
||||
fi
|
||||
|
||||
if [ -n "${TEST_ENTERPRISE_GROUP}" ]; then
|
||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerset envsubst | kubectl apply -f -
|
||||
else
|
||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerdeploy envsubst | kubectl apply -f -
|
||||
fi
|
||||
else
|
||||
echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ENTERPRISE_GROUP to deploy.'
|
||||
fi
|
||||
else
|
||||
echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ENTERPRISE to deploy.'
|
||||
fi
|
||||
|
||||
58
acceptance/deploy_runners.sh
Executable file
58
acceptance/deploy_runners.sh
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
OP=${OP:-apply}
|
||||
|
||||
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
||||
|
||||
if [ -n "${TEST_REPO}" ]; then
|
||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl ${OP} -f -
|
||||
else
|
||||
echo "Running ${OP} runnerdeployment and hra. Set USE_RUNNERSET if you want to deploy runnerset instead."
|
||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||
fi
|
||||
else
|
||||
echo "Skipped ${OP} for runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy."
|
||||
fi
|
||||
|
||||
if [ -n "${TEST_ORG}" ]; then
|
||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerset envsubst | kubectl ${OP} -f -
|
||||
else
|
||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||
fi
|
||||
|
||||
if [ -n "${TEST_ORG_GROUP}" ]; then
|
||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerset envsubst | kubectl ${OP} -f -
|
||||
else
|
||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||
fi
|
||||
else
|
||||
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ORG_GROUP to ${OP}."
|
||||
fi
|
||||
else
|
||||
echo "Skipped ${OP} on organizational runnerdeployment. Set TEST_ORG to ${OP}."
|
||||
fi
|
||||
|
||||
if [ -n "${TEST_ENTERPRISE}" ]; then
|
||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerset envsubst | kubectl ${OP} -f -
|
||||
else
|
||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||
fi
|
||||
|
||||
if [ -n "${TEST_ENTERPRISE_GROUP}" ]; then
|
||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerset envsubst | kubectl ${OP} -f -
|
||||
else
|
||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||
fi
|
||||
else
|
||||
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ENTERPRISE_GROUP to ${OP}."
|
||||
fi
|
||||
else
|
||||
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ENTERPRISE to ${OP}."
|
||||
fi
|
||||
82
acceptance/testdata/kubernetes_container_mode.envsubst.yaml
vendored
Normal file
82
acceptance/testdata/kubernetes_container_mode.envsubst.yaml
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
# USAGE:
|
||||
# cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=default envsubst | kubectl apply -f -
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: k8s-mode-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/exec"]
|
||||
verbs: ["get", "create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/log"]
|
||||
verbs: ["get", "list", "watch",]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["jobs"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: runner-status-updater
|
||||
rules:
|
||||
- apiGroups: ["actions.summerwind.dev"]
|
||||
resources: ["runners/status"]
|
||||
verbs: ["get", "update", "patch"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: runner
|
||||
namespace: ${NAMESPACE}
|
||||
---
|
||||
# To verify it's working, try:
|
||||
# kubectl auth can-i --as system:serviceaccount:default:runner get pod
|
||||
# If incomplete, workflows and jobs would fail with an error message like:
|
||||
# Error: Error: The Service account needs the following permissions [{"group":"","verbs":["get","list","create","delete"],"resource":"pods","subresource":""},{"group":"","verbs":["get","create"],"resource":"pods","subresource":"exec"},{"group":"","verbs":["get","list","watch"],"resource":"pods","subresource":"log"},{"group":"batch","verbs":["get","list","create","delete"],"resource":"jobs","subresource":""},{"group":"","verbs":["create","delete","get","list"],"resource":"secrets","subresource":""}] on the pod resource in the 'default' namespace. Please contact your self hosted runner administrator.
|
||||
# Error: Process completed with exit code 1.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
# This role binding allows "jane" to read pods in the "default" namespace.
|
||||
# You need to already have a Role named "pod-reader" in that namespace.
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: runner-k8s-mode-runner
|
||||
namespace: ${NAMESPACE}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: runner
|
||||
namespace: ${NAMESPACE}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: k8s-mode-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: runner-runner-stat-supdater
|
||||
namespace: ${NAMESPACE}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: runner
|
||||
namespace: ${NAMESPACE}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: runner-status-updater
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: org-runnerdeploy-runner-work-dir
|
||||
labels:
|
||||
content: org-runnerdeploy-runner-work-dir
|
||||
provisioner: rancher.io/local-path
|
||||
reclaimPolicy: Delete
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
19
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
19
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
@@ -19,11 +19,6 @@ spec:
|
||||
|
||||
ephemeral: ${TEST_EPHEMERAL}
|
||||
|
||||
# Whether to pass --ephemeral (true) or --once (false, deprecated)
|
||||
env:
|
||||
- name: RUNNER_FEATURE_FLAG_EPHEMERAL
|
||||
value: "${RUNNER_FEATURE_FLAG_EPHEMERAL}"
|
||||
|
||||
#
|
||||
# dockerd within runner container
|
||||
#
|
||||
@@ -48,6 +43,17 @@ spec:
|
||||
# Non-standard working directory
|
||||
#
|
||||
# workDir: "/"
|
||||
|
||||
# # Uncomment the below to enable the kubernetes container mode
|
||||
# # See https://github.com/actions-runner-controller/actions-runner-controller#runner-with-k8s-jobs
|
||||
containerMode: kubernetes
|
||||
workVolumeClaimTemplate:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: "${NAME}-runner-work-dir"
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
---
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
@@ -57,7 +63,8 @@ spec:
|
||||
scaleTargetRef:
|
||||
name: ${NAME}
|
||||
scaleUpTriggers:
|
||||
- githubEvent: {}
|
||||
- githubEvent:
|
||||
workflowJob: {}
|
||||
amount: 1
|
||||
duration: "10m"
|
||||
minReplicas: ${RUNNER_MIN_REPLICAS}
|
||||
|
||||
187
acceptance/testdata/runnerset.envsubst.yaml
vendored
187
acceptance/testdata/runnerset.envsubst.yaml
vendored
@@ -1,3 +1,59 @@
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: ${NAME}-runner-work-dir
|
||||
labels:
|
||||
content: ${NAME}-runner-work-dir
|
||||
provisioner: rancher.io/local-path
|
||||
reclaimPolicy: Delete
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: ${NAME}
|
||||
# In kind environments, the provider writes:
|
||||
# /var/lib/docker/volumes/KIND_NODE_CONTAINER_VOL_ID/_data/local-path-provisioner/PV_NAME
|
||||
# It can be hundreds of gigabytes depending on what you cache in the test workflow. Beware to not encounter `no space left on device` errors!
|
||||
# If you did encounter no space errorrs try:
|
||||
# docker system prune
|
||||
# docker buildx prune #=> frees up /var/lib/docker/volumes/buildx_buildkit_container-builder0_state
|
||||
# sudo rm -rf /var/lib/docker/volumes/KIND_NODE_CONTAINER_VOL_ID/_data/local-path-provisioner #=> frees up local-path-provisioner's data
|
||||
provisioner: rancher.io/local-path
|
||||
reclaimPolicy: Retain
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: ${NAME}-var-lib-docker
|
||||
labels:
|
||||
content: ${NAME}-var-lib-docker
|
||||
provisioner: rancher.io/local-path
|
||||
reclaimPolicy: Retain
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: ${NAME}-cache
|
||||
labels:
|
||||
content: ${NAME}-cache
|
||||
provisioner: rancher.io/local-path
|
||||
reclaimPolicy: Retain
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: ${NAME}-runner-tool-cache
|
||||
labels:
|
||||
content: ${NAME}-runner-tool-cache
|
||||
provisioner: rancher.io/local-path
|
||||
reclaimPolicy: Retain
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
---
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerSet
|
||||
metadata:
|
||||
@@ -62,8 +118,125 @@ spec:
|
||||
env:
|
||||
- name: RUNNER_FEATURE_FLAG_EPHEMERAL
|
||||
value: "${RUNNER_FEATURE_FLAG_EPHEMERAL}"
|
||||
#- name: docker
|
||||
# #image: mumoshu/actions-runner-dind:dev
|
||||
- name: GOMODCACHE
|
||||
value: "/home/runner/.cache/go-mod"
|
||||
# PV-backed runner work dir
|
||||
volumeMounts:
|
||||
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
||||
# The volume and mount with the same names will be created by workVolumeClaimTemplate and the kubernetes container mode support.
|
||||
# - name: work
|
||||
# mountPath: /runner/_work
|
||||
# Cache docker image layers, in case dockerdWithinRunnerContainer=true
|
||||
- name: var-lib-docker
|
||||
mountPath: /var/lib/docker
|
||||
# Cache go modules and builds
|
||||
# - name: gocache
|
||||
# # Run `goenv | grep GOCACHE` to verify the path is correct for your env
|
||||
# mountPath: /home/runner/.cache/go-build
|
||||
# - name: gomodcache
|
||||
# # Run `goenv | grep GOMODCACHE` to verify the path is correct for your env
|
||||
# # mountPath: /home/runner/go/pkg/mod
|
||||
- name: cache
|
||||
# go: could not create module cache: stat /home/runner/.cache/go-mod: permission denied
|
||||
mountPath: "/home/runner/.cache"
|
||||
- name: runner-tool-cache
|
||||
# This corresponds to our runner image's default setting of RUNNER_TOOL_CACHE=/opt/hostedtoolcache.
|
||||
#
|
||||
# In case you customize the envvar in both runner and docker containers of the runner pod spec,
|
||||
# You'd need to change this mountPath accordingly.
|
||||
#
|
||||
# The tool cache directory is defined in actions/toolkit's tool-cache module:
|
||||
# https://github.com/actions/toolkit/blob/2f164000dcd42fb08287824a3bc3030dbed33687/packages/tool-cache/src/tool-cache.ts#L621-L638
|
||||
#
|
||||
# Many setup-* actions like setup-go utilizes the tool-cache module to download and cache installed binaries:
|
||||
# https://github.com/actions/setup-go/blob/56a61c9834b4a4950dbbf4740af0b8a98c73b768/src/installer.ts#L144
|
||||
mountPath: "/opt/hostedtoolcache"
|
||||
# Valid only when dockerdWithinRunnerContainer=false
|
||||
- name: docker
|
||||
# PV-backed runner work dir
|
||||
volumeMounts:
|
||||
- name: work
|
||||
mountPath: /runner/_work
|
||||
# Cache docker image layers, in case dockerdWithinRunnerContainer=false
|
||||
- name: var-lib-docker
|
||||
mountPath: /var/lib/docker
|
||||
# image: mumoshu/actions-runner-dind:dev
|
||||
|
||||
# For buildx cache
|
||||
- name: cache
|
||||
mountPath: "/home/runner/.cache"
|
||||
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
||||
# volumes:
|
||||
# - name: work
|
||||
# ephemeral:
|
||||
# volumeClaimTemplate:
|
||||
# spec:
|
||||
# accessModes:
|
||||
# - ReadWriteOnce
|
||||
# storageClassName: "${NAME}-runner-work-dir"
|
||||
# resources:
|
||||
# requests:
|
||||
# storage: 10Gi
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: vol1
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Mi
|
||||
storageClassName: ${NAME}
|
||||
## Dunno which provider supports auto-provisioning with selector.
|
||||
## At least the rancher local path provider stopped with:
|
||||
## waiting for a volume to be created, either by external provisioner "rancher.io/local-path" or manually created by system administrator
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# runnerset-volume-id: ${NAME}-vol1
|
||||
- metadata:
|
||||
name: vol2
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Mi
|
||||
storageClassName: ${NAME}
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# runnerset-volume-id: ${NAME}-vol2
|
||||
- metadata:
|
||||
name: var-lib-docker
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Mi
|
||||
storageClassName: ${NAME}-var-lib-docker
|
||||
- metadata:
|
||||
name: cache
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Mi
|
||||
storageClassName: ${NAME}-cache
|
||||
- metadata:
|
||||
name: runner-tool-cache
|
||||
# It turns out labels doesn't distinguish PVs across PVCs and the
|
||||
# end result is PVs are reused by wrong PVCs.
|
||||
# The correct way seems to be to differentiate storage class per pvc template.
|
||||
# labels:
|
||||
# id: runner-tool-cache
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Mi
|
||||
storageClassName: ${NAME}-runner-tool-cache
|
||||
---
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
@@ -74,9 +247,17 @@ spec:
|
||||
kind: RunnerSet
|
||||
name: ${NAME}
|
||||
scaleUpTriggers:
|
||||
- githubEvent: {}
|
||||
- githubEvent:
|
||||
workflowJob: {}
|
||||
amount: 1
|
||||
duration: "10m"
|
||||
minReplicas: ${RUNNER_MIN_REPLICAS}
|
||||
maxReplicas: 10
|
||||
scaleDownDelaySecondsAfterScaleOut: ${RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT}
|
||||
# Comment out the whole metrics if you'd like to solely test webhook-based scaling
|
||||
metrics:
|
||||
- type: PercentageRunnersBusy
|
||||
scaleUpThreshold: '0.75'
|
||||
scaleDownThreshold: '0.25'
|
||||
scaleUpFactor: '2'
|
||||
scaleDownFactor: '0.5'
|
||||
|
||||
@@ -1,6 +1,18 @@
|
||||
# Set actions-runner-controller settings for testing
|
||||
logLevel: "-4"
|
||||
imagePullSecrets:
|
||||
- name:
|
||||
image:
|
||||
actionsRunnerImagePullSecrets:
|
||||
- name:
|
||||
runner:
|
||||
statusUpdateHook:
|
||||
enabled: true
|
||||
rbac:
|
||||
allowGrantingKubernetesContainerModePermissions: true
|
||||
githubWebhookServer:
|
||||
imagePullSecrets:
|
||||
- name:
|
||||
logLevel: "-4"
|
||||
enabled: true
|
||||
labels: {}
|
||||
|
||||
@@ -60,6 +60,9 @@ type HorizontalRunnerAutoscalerSpec struct {
|
||||
// The earlier a scheduled override is, the higher it is prioritized.
|
||||
// +optional
|
||||
ScheduledOverrides []ScheduledOverride `json:"scheduledOverrides,omitempty"`
|
||||
|
||||
// +optional
|
||||
GitHubAPICredentialsFrom *GitHubAPICredentialsFrom `json:"githubAPICredentialsFrom,omitempty"`
|
||||
}
|
||||
|
||||
type ScaleUpTrigger struct {
|
||||
|
||||
@@ -18,8 +18,10 @@ package v1alpha1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -71,6 +73,19 @@ type RunnerConfig struct {
|
||||
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
|
||||
// +optional
|
||||
VolumeStorageMedium *string `json:"volumeStorageMedium,omitempty"`
|
||||
|
||||
// +optional
|
||||
ContainerMode string `json:"containerMode,omitempty"`
|
||||
|
||||
GitHubAPICredentialsFrom *GitHubAPICredentialsFrom `json:"githubAPICredentialsFrom,omitempty"`
|
||||
}
|
||||
|
||||
type GitHubAPICredentialsFrom struct {
|
||||
SecretRef SecretReference `json:"secretRef,omitempty"`
|
||||
}
|
||||
|
||||
type SecretReference struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// RunnerPodSpec defines the desired pod spec fields of the runner pod
|
||||
@@ -135,6 +150,9 @@ type RunnerPodSpec struct {
|
||||
// +optional
|
||||
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
||||
|
||||
// +optional
|
||||
PriorityClassName string `json:"priorityClassName,omitempty"`
|
||||
|
||||
// +optional
|
||||
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
|
||||
|
||||
@@ -154,10 +172,32 @@ type RunnerPodSpec struct {
|
||||
|
||||
// +optional
|
||||
DnsConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"`
|
||||
|
||||
// +optional
|
||||
WorkVolumeClaimTemplate *WorkVolumeClaimTemplate `json:"workVolumeClaimTemplate,omitempty"`
|
||||
}
|
||||
|
||||
func (rs *RunnerSpec) Validate(rootPath *field.Path) field.ErrorList {
|
||||
var (
|
||||
errList field.ErrorList
|
||||
err error
|
||||
)
|
||||
|
||||
err = rs.validateRepository()
|
||||
if err != nil {
|
||||
errList = append(errList, field.Invalid(rootPath.Child("repository"), rs.Repository, err.Error()))
|
||||
}
|
||||
|
||||
err = rs.validateWorkVolumeClaimTemplate()
|
||||
if err != nil {
|
||||
errList = append(errList, field.Invalid(rootPath.Child("workVolumeClaimTemplate"), rs.WorkVolumeClaimTemplate, err.Error()))
|
||||
}
|
||||
|
||||
return errList
|
||||
}
|
||||
|
||||
// ValidateRepository validates repository field.
|
||||
func (rs *RunnerSpec) ValidateRepository() error {
|
||||
func (rs *RunnerSpec) validateRepository() error {
|
||||
// Enterprise, Organization and repository are both exclusive.
|
||||
foundCount := 0
|
||||
if len(rs.Organization) > 0 {
|
||||
@@ -179,6 +219,18 @@ func (rs *RunnerSpec) ValidateRepository() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *RunnerSpec) validateWorkVolumeClaimTemplate() error {
|
||||
if rs.ContainerMode != "kubernetes" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if rs.WorkVolumeClaimTemplate == nil {
|
||||
return errors.New("Spec.ContainerMode: kubernetes must have workVolumeClaimTemplate field specified")
|
||||
}
|
||||
|
||||
return rs.WorkVolumeClaimTemplate.validate()
|
||||
}
|
||||
|
||||
// RunnerStatus defines the observed state of Runner
|
||||
type RunnerStatus struct {
|
||||
// Turns true only if the runner pod is ready.
|
||||
@@ -207,13 +259,60 @@ type RunnerStatusRegistration struct {
|
||||
ExpiresAt metav1.Time `json:"expiresAt"`
|
||||
}
|
||||
|
||||
type WorkVolumeClaimTemplate struct {
|
||||
StorageClassName string `json:"storageClassName"`
|
||||
AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes"`
|
||||
Resources corev1.ResourceRequirements `json:"resources"`
|
||||
}
|
||||
|
||||
func (w *WorkVolumeClaimTemplate) validate() error {
|
||||
if w.AccessModes == nil || len(w.AccessModes) == 0 {
|
||||
return errors.New("Access mode should have at least one mode specified")
|
||||
}
|
||||
|
||||
for _, accessMode := range w.AccessModes {
|
||||
switch accessMode {
|
||||
case corev1.ReadWriteOnce, corev1.ReadWriteMany:
|
||||
default:
|
||||
return fmt.Errorf("Access mode %v is not supported", accessMode)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WorkVolumeClaimTemplate) V1Volume() corev1.Volume {
|
||||
return corev1.Volume{
|
||||
Name: "work",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Ephemeral: &corev1.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &corev1.PersistentVolumeClaimTemplate{
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: w.AccessModes,
|
||||
StorageClassName: &w.StorageClassName,
|
||||
Resources: w.Resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WorkVolumeClaimTemplate) V1VolumeMount(mountPath string) corev1.VolumeMount {
|
||||
return corev1.VolumeMount{
|
||||
MountPath: mountPath,
|
||||
Name: "work",
|
||||
}
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.enterprise",name=Enterprise,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.organization",name=Organization,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.repository",name=Repository,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.group",name=Group,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.message",name=Message,type=string
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||
|
||||
// Runner is the Schema for the runners API
|
||||
|
||||
@@ -66,15 +66,7 @@ func (r *Runner) ValidateDelete() error {
|
||||
|
||||
// Validate validates resource spec.
|
||||
func (r *Runner) Validate() error {
|
||||
var (
|
||||
errList field.ErrorList
|
||||
err error
|
||||
)
|
||||
|
||||
err = r.Spec.ValidateRepository()
|
||||
if err != nil {
|
||||
errList = append(errList, field.Invalid(field.NewPath("spec", "repository"), r.Spec.Repository, err.Error()))
|
||||
}
|
||||
errList := r.Spec.Validate(field.NewPath("spec"))
|
||||
|
||||
if len(errList) > 0 {
|
||||
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
||||
|
||||
@@ -66,15 +66,7 @@ func (r *RunnerDeployment) ValidateDelete() error {
|
||||
|
||||
// Validate validates resource spec.
|
||||
func (r *RunnerDeployment) Validate() error {
|
||||
var (
|
||||
errList field.ErrorList
|
||||
err error
|
||||
)
|
||||
|
||||
err = r.Spec.Template.Spec.ValidateRepository()
|
||||
if err != nil {
|
||||
errList = append(errList, field.Invalid(field.NewPath("spec", "template", "spec", "repository"), r.Spec.Template.Spec.Repository, err.Error()))
|
||||
}
|
||||
errList := r.Spec.Template.Spec.Validate(field.NewPath("spec", "template", "spec"))
|
||||
|
||||
if len(errList) > 0 {
|
||||
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
||||
|
||||
@@ -66,15 +66,7 @@ func (r *RunnerReplicaSet) ValidateDelete() error {
|
||||
|
||||
// Validate validates resource spec.
|
||||
func (r *RunnerReplicaSet) Validate() error {
|
||||
var (
|
||||
errList field.ErrorList
|
||||
err error
|
||||
)
|
||||
|
||||
err = r.Spec.Template.Spec.ValidateRepository()
|
||||
if err != nil {
|
||||
errList = append(errList, field.Invalid(field.NewPath("spec", "template", "spec", "repository"), r.Spec.Template.Spec.Repository, err.Error()))
|
||||
}
|
||||
errList := r.Spec.Template.Spec.Validate(field.NewPath("spec", "template", "spec"))
|
||||
|
||||
if len(errList) > 0 {
|
||||
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
||||
|
||||
@@ -33,6 +33,12 @@ type RunnerSetSpec struct {
|
||||
// +nullable
|
||||
EffectiveTime *metav1.Time `json:"effectiveTime,omitempty"`
|
||||
|
||||
// +optional
|
||||
ServiceAccountName string `json:"serviceAccountName,omitempty"`
|
||||
|
||||
// +optional
|
||||
WorkVolumeClaimTemplate *WorkVolumeClaimTemplate `json:"workVolumeClaimTemplate,omitempty"`
|
||||
|
||||
appsv1.StatefulSetSpec `json:",inline"`
|
||||
}
|
||||
|
||||
|
||||
@@ -90,6 +90,22 @@ func (in *CheckRunSpec) DeepCopy() *CheckRunSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitHubAPICredentialsFrom) DeepCopyInto(out *GitHubAPICredentialsFrom) {
|
||||
*out = *in
|
||||
out.SecretRef = in.SecretRef
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubAPICredentialsFrom.
|
||||
func (in *GitHubAPICredentialsFrom) DeepCopy() *GitHubAPICredentialsFrom {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GitHubAPICredentialsFrom)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitHubEventScaleUpTriggerSpec) DeepCopyInto(out *GitHubEventScaleUpTriggerSpec) {
|
||||
*out = *in
|
||||
@@ -231,6 +247,11 @@ func (in *HorizontalRunnerAutoscalerSpec) DeepCopyInto(out *HorizontalRunnerAuto
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.GitHubAPICredentialsFrom != nil {
|
||||
in, out := &in.GitHubAPICredentialsFrom, &out.GitHubAPICredentialsFrom
|
||||
*out = new(GitHubAPICredentialsFrom)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerSpec.
|
||||
@@ -425,6 +446,11 @@ func (in *RunnerConfig) DeepCopyInto(out *RunnerConfig) {
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.GitHubAPICredentialsFrom != nil {
|
||||
in, out := &in.GitHubAPICredentialsFrom, &out.GitHubAPICredentialsFrom
|
||||
*out = new(GitHubAPICredentialsFrom)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerConfig.
|
||||
@@ -741,6 +767,11 @@ func (in *RunnerPodSpec) DeepCopyInto(out *RunnerPodSpec) {
|
||||
*out = new(v1.PodDNSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.WorkVolumeClaimTemplate != nil {
|
||||
in, out := &in.WorkVolumeClaimTemplate, &out.WorkVolumeClaimTemplate
|
||||
*out = new(WorkVolumeClaimTemplate)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerPodSpec.
|
||||
@@ -939,6 +970,11 @@ func (in *RunnerSetSpec) DeepCopyInto(out *RunnerSetSpec) {
|
||||
in, out := &in.EffectiveTime, &out.EffectiveTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.WorkVolumeClaimTemplate != nil {
|
||||
in, out := &in.WorkVolumeClaimTemplate, &out.WorkVolumeClaimTemplate
|
||||
*out = new(WorkVolumeClaimTemplate)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.StatefulSetSpec.DeepCopyInto(&out.StatefulSetSpec)
|
||||
}
|
||||
|
||||
@@ -1126,6 +1162,42 @@ func (in *ScheduledOverride) DeepCopy() *ScheduledOverride {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
|
||||
func (in *SecretReference) DeepCopy() *SecretReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SecretReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkVolumeClaimTemplate) DeepCopyInto(out *WorkVolumeClaimTemplate) {
|
||||
*out = *in
|
||||
if in.AccessModes != nil {
|
||||
in, out := &in.AccessModes, &out.AccessModes
|
||||
*out = make([]v1.PersistentVolumeAccessMode, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.Resources.DeepCopyInto(&out.Resources)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkVolumeClaimTemplate.
|
||||
func (in *WorkVolumeClaimTemplate) DeepCopy() *WorkVolumeClaimTemplate {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkVolumeClaimTemplate)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkflowJobSpec) DeepCopyInto(out *WorkflowJobSpec) {
|
||||
*out = *in
|
||||
|
||||
@@ -15,10 +15,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.17.3
|
||||
version: 0.20.2
|
||||
|
||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||
appVersion: 0.22.3
|
||||
appVersion: 0.25.2
|
||||
|
||||
home: https://github.com/actions-runner-controller/actions-runner-controller
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------|
|
||||
| `labels` | Set labels to apply to all resources in the chart | |
|
||||
| `replicaCount` | Set the number of controller pods | 1 |
|
||||
| `webhookPort` | Set the containerPort for the webhook Pod | 9443 |
|
||||
| `syncPeriod` | Set the period in which the controler reconciles the desired runners count | 10m |
|
||||
| `enableLeaderElection` | Enable election configuration | true |
|
||||
| `leaderElectionId` | Set the election ID for the controller group | |
|
||||
@@ -72,11 +73,11 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
||||
| `scope.watchNamespace` | Tells the controller and the github webhook server which namespace to watch if `scope.singleNamespace` is true | `Release.Namespace` (the default namespace of the helm chart). |
|
||||
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
|
||||
| `certManagerEnabled` | Enable cert-manager. If disabled you must set admissionWebHooks.caBundle and create TLS secrets manually | true |
|
||||
| `runner.statusUpdateHook.enabled` | Use custom RBAC for runners (role, role binding and service account), this will enable reporting runner statuses | false |
|
||||
| `admissionWebHooks.caBundle` | Base64-encoded PEM bundle containing the CA that signed the webhook's serving certificate | |
|
||||
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
|
||||
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
|
||||
| `githubWebhookServer.useRunnerGroupsVisibility` | Enable supporting runner groups with custom visibility. This will incur in extra API calls and may blow up your budget. Currently, you also need to set `githubWebhookServer.secret.enabled` to enable this feature. | false |
|
||||
| `githubWebhookServer.syncPeriod` | Set the period in which the controller reconciles the resources | 10m |
|
||||
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
|
||||
| `githubWebhookServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
|
||||
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
|
||||
|
||||
@@ -61,6 +61,16 @@ spec:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
githubAPICredentialsFrom:
|
||||
properties:
|
||||
secretRef:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: object
|
||||
maxReplicas:
|
||||
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
||||
type: integer
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -44,6 +44,7 @@ spec:
|
||||
{{- if .Values.leaderElectionId }}
|
||||
- "--leader-election-id={{ .Values.leaderElectionId }}"
|
||||
{{- end }}
|
||||
- "--port={{ .Values.webhookPort }}"
|
||||
- "--sync-period={{ .Values.syncPeriod }}"
|
||||
- "--default-scale-down-delay={{ .Values.defaultScaleDownDelay }}"
|
||||
- "--docker-image={{ .Values.image.dindSidecarRepositoryAndTag }}"
|
||||
@@ -57,15 +58,15 @@ spec:
|
||||
{{- if .Values.scope.singleNamespace }}
|
||||
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
|
||||
{{- end }}
|
||||
{{- if .Values.githubAPICacheDuration }}
|
||||
- "--github-api-cache-duration={{ .Values.githubAPICacheDuration }}"
|
||||
{{- end }}
|
||||
{{- if .Values.logLevel }}
|
||||
- "--log-level={{ .Values.logLevel }}"
|
||||
{{- end }}
|
||||
{{- if .Values.runnerGithubURL }}
|
||||
- "--runner-github-url={{ .Values.runnerGithubURL }}"
|
||||
{{- end }}
|
||||
{{- if .Values.runner.statusUpdateHook.enabled }}
|
||||
- "--runner-status-update-hook"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/manager"
|
||||
env:
|
||||
@@ -117,15 +118,19 @@ spec:
|
||||
name: {{ include "actions-runner-controller.secretName" . }}
|
||||
optional: true
|
||||
{{- end }}
|
||||
{{- if kindIs "slice" .Values.env }}
|
||||
{{- toYaml .Values.env | nindent 8 }}
|
||||
{{- else }}
|
||||
{{- range $key, $val := .Values.env }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||
name: manager
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: 9443
|
||||
- containerPort: {{ .Values.webhookPort }}
|
||||
name: webhook-server
|
||||
protocol: TCP
|
||||
{{- if not .Values.metrics.proxy.enabled }}
|
||||
|
||||
@@ -39,7 +39,6 @@ spec:
|
||||
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
||||
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
||||
- "--sync-period={{ .Values.githubWebhookServer.syncPeriod }}"
|
||||
{{- if .Values.githubWebhookServer.logLevel }}
|
||||
- "--log-level={{ .Values.githubWebhookServer.logLevel }}"
|
||||
{{- end }}
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
{{- if .Values.githubWebhookServer.ingress.enabled -}}
|
||||
{{- $fullName := include "actions-runner-controller-github-webhook-server.fullname" . -}}
|
||||
{{- $svcPort := (index .Values.githubWebhookServer.service.ports 0).port -}}
|
||||
{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
|
||||
{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
|
||||
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" }}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else if .Capabilities.APIVersions.Has "extensions/v1beta1" }}
|
||||
{{- else if .Capabilities.APIVersions.Has "extensions/v1beta1/Ingress" }}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
{{- with .Values.githubWebhookServer.ingress.annotations }}
|
||||
@@ -36,13 +37,16 @@ spec:
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- if .extraPaths }}
|
||||
{{- toYaml .extraPaths | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
|
||||
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
||||
pathType: {{ .pathType }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
|
||||
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
|
||||
@@ -12,5 +12,17 @@ data:
|
||||
{{- if .Values.githubWebhookServer.secret.github_webhook_secret_token }}
|
||||
github_webhook_secret_token: {{ .Values.githubWebhookServer.secret.github_webhook_secret_token | toString | b64enc }}
|
||||
{{- end }}
|
||||
{{- if .Values.githubWebhookServer.secret.github_app_id }}
|
||||
github_app_id: {{ .Values.githubWebhookServer.secret.github_app_id | toString | b64enc }}
|
||||
{{- end }}
|
||||
{{- if .Values.githubWebhookServer.secret.github_app_installation_id }}
|
||||
github_app_installation_id: {{ .Values.githubWebhookServer.secret.github_app_installation_id | toString | b64enc }}
|
||||
{{- end }}
|
||||
{{- if .Values.githubWebhookServer.secret.github_app_private_key }}
|
||||
github_app_private_key: {{ .Values.githubWebhookServer.secret.github_app_private_key | toString | b64enc }}
|
||||
{{- end }}
|
||||
{{- if .Values.githubWebhookServer.secret.github_token }}
|
||||
github_token: {{ .Values.githubWebhookServer.secret.github_token | toString | b64enc }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -195,6 +195,28 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
@@ -228,3 +250,72 @@ rules:
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
{{- if .Values.runner.statusUpdateHook.enabled }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- rolebindings
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- roles
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
{{- end }}
|
||||
{{- if .Values.rbac.allowGrantingKubernetesContainerModePermissions }}
|
||||
{{/* These permissions are required by ARC to create RBAC resources for the runner pod to use the kubernetes container mode. */}}
|
||||
{{/* See https://github.com/actions-runner-controller/actions-runner-controller/pull/1268/files#r917331632 */}}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/exec
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/log
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "batch"
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
{{- end }}
|
||||
|
||||
@@ -13,7 +13,7 @@ spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 9443
|
||||
targetPort: {{ .Values.webhookPort }}
|
||||
protocol: TCP
|
||||
name: https
|
||||
selector:
|
||||
|
||||
@@ -6,6 +6,7 @@ labels: {}
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
webhookPort: 9443
|
||||
syncPeriod: 1m
|
||||
defaultScaleDownDelay: 10m
|
||||
|
||||
@@ -14,12 +15,6 @@ enableLeaderElection: true
|
||||
# Must be unique if more than one controller installed onto the same namespace.
|
||||
#leaderElectionId: "actions-runner-controller"
|
||||
|
||||
# DEPRECATED: This has been removed as unnecessary in #1192
|
||||
# The controller tries its best not to repeat the duplicate GitHub API call
|
||||
# within this duration.
|
||||
# Defaults to syncPeriod - 10s.
|
||||
#githubAPICacheDuration: 30s
|
||||
|
||||
# The URL of your GitHub Enterprise server, if you're using one.
|
||||
#githubEnterpriseServerURL: https://github.example.com
|
||||
|
||||
@@ -66,6 +61,18 @@ imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
runner:
|
||||
statusUpdateHook:
|
||||
enabled: false
|
||||
|
||||
rbac:
|
||||
{}
|
||||
# # This allows ARC to dynamically create a ServiceAccount and a Role for each Runner pod that uses "kubernetes" container mode,
|
||||
# # by extending ARC's manager role to have the same permissions required by the pod runs the runner agent in "kubernetes" container mode.
|
||||
# # Without this, Kubernetes blocks ARC to create the role to prevent a priviledge escalation.
|
||||
# # See https://github.com/actions-runner-controller/actions-runner-controller/pull/1268/files#r917327010
|
||||
# allowGrantingKubernetesContainerModePermissions: true
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
@@ -108,7 +115,7 @@ metrics:
|
||||
enabled: true
|
||||
image:
|
||||
repository: quay.io/brancz/kube-rbac-proxy
|
||||
tag: v0.11.0
|
||||
tag: v0.13.0
|
||||
|
||||
resources:
|
||||
{}
|
||||
@@ -142,10 +149,20 @@ priorityClassName: ""
|
||||
|
||||
env:
|
||||
{}
|
||||
# specify additional environment variables for the controller pod.
|
||||
# It's possible to specify either key vale pairs e.g.:
|
||||
# http_proxy: "proxy.com:8080"
|
||||
# https_proxy: "proxy.com:8080"
|
||||
# no_proxy: ""
|
||||
|
||||
# or a list of complete environment variable definitions e.g.:
|
||||
# - name: GITHUB_APP_INSTALLATION_ID
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# key: some_key_in_the_secret
|
||||
# name: some-secret-name
|
||||
# optional: true
|
||||
|
||||
## specify additional volumes to mount in the manager container, this can be used
|
||||
## to specify additional storage of material or to inject files from ConfigMaps
|
||||
## into the running container
|
||||
@@ -174,7 +191,6 @@ admissionWebHooks:
|
||||
githubWebhookServer:
|
||||
enabled: false
|
||||
replicaCount: 1
|
||||
syncPeriod: 10m
|
||||
useRunnerGroupsVisibility: false
|
||||
secret:
|
||||
enabled: false
|
||||
@@ -182,6 +198,13 @@ githubWebhookServer:
|
||||
name: "github-webhook-server"
|
||||
### GitHub Webhook Configuration
|
||||
github_webhook_secret_token: ""
|
||||
### GitHub Apps Configuration
|
||||
## NOTE: IDs MUST be strings, use quotes
|
||||
#github_app_id: ""
|
||||
#github_app_installation_id: ""
|
||||
#github_app_private_key: |
|
||||
### GitHub PAT Configuration
|
||||
#github_token: ""
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
@@ -223,6 +246,20 @@ githubWebhookServer:
|
||||
paths: []
|
||||
# - path: /*
|
||||
# pathType: ImplementationSpecific
|
||||
# Extra paths that are not automatically connected to the server. This is useful when working with annotation based services.
|
||||
extraPaths: []
|
||||
# - path: /*
|
||||
# backend:
|
||||
# serviceName: ssl-redirect
|
||||
# servicePort: use-annotation
|
||||
## for Kubernetes >=1.19 (when "networking.k8s.io/v1" is used)
|
||||
# - path: /*
|
||||
# pathType: Prefix
|
||||
# backend:
|
||||
# service:
|
||||
# name: ssl-redirect
|
||||
# port:
|
||||
# name: use-annotation
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
|
||||
@@ -69,9 +69,8 @@ func main() {
|
||||
|
||||
watchNamespace string
|
||||
|
||||
enableLeaderElection bool
|
||||
syncPeriod time.Duration
|
||||
logLevel string
|
||||
logLevel string
|
||||
queueLimit int
|
||||
|
||||
ghClient *github.Client
|
||||
)
|
||||
@@ -88,10 +87,8 @@ func main() {
|
||||
flag.StringVar(&webhookAddr, "webhook-addr", ":8000", "The address the metric endpoint binds to.")
|
||||
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
|
||||
flag.StringVar(&watchNamespace, "watch-namespace", "", "The namespace to watch for HorizontalRunnerAutoscaler's to scale on Webhook. Set to empty for letting it watch for all namespaces.")
|
||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
||||
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change")
|
||||
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
|
||||
flag.IntVar(&queueLimit, "queue-limit", controllers.DefaultQueueLimit, `The maximum length of the scale operation queue. The scale opration is enqueued per every matching webhook event, and the server returns a 500 HTTP status when the queue was already full on enqueue attempt.`)
|
||||
flag.StringVar(&webhookSecretToken, "github-webhook-secret-token", "", "The personal access token of GitHub.")
|
||||
flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.")
|
||||
flag.Int64Var(&c.AppID, "github-app-id", c.AppID, "The application ID of GitHub App.")
|
||||
@@ -142,10 +139,10 @@ func main() {
|
||||
setupLog.Info("GitHub client is not initialized. Runner groups with custom visibility are not supported. If needed, please provide GitHub authentication. This will incur in extra GitHub API calls")
|
||||
}
|
||||
|
||||
syncPeriod := 10 * time.Minute
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
SyncPeriod: &syncPeriod,
|
||||
LeaderElection: enableLeaderElection,
|
||||
Namespace: watchNamespace,
|
||||
MetricsBindAddress: metricsAddr,
|
||||
Port: 9443,
|
||||
@@ -164,6 +161,7 @@ func main() {
|
||||
SecretKeyBytes: []byte(webhookSecretToken),
|
||||
Namespace: watchNamespace,
|
||||
GitHubClient: ghClient,
|
||||
QueueLimit: queueLimit,
|
||||
}
|
||||
|
||||
if err = hraGitHubWebhook.SetupWithManager(mgr); err != nil {
|
||||
|
||||
@@ -61,6 +61,16 @@ spec:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
githubAPICredentialsFrom:
|
||||
properties:
|
||||
secretRef:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: object
|
||||
maxReplicas:
|
||||
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
||||
type: integer
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -22,8 +22,6 @@ bases:
|
||||
- ../certmanager
|
||||
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
||||
#- ../prometheus
|
||||
# [GH_WEBHOOK_SERVER] To enable the GitHub webhook server, uncomment all sections with 'GH_WEBHOOK_SERVER'.
|
||||
#- ../github-webhook-server
|
||||
|
||||
patchesStrategicMerge:
|
||||
# Protect the /metrics endpoint by putting it behind auth.
|
||||
@@ -46,10 +44,6 @@ patchesStrategicMerge:
|
||||
# 'CERTMANAGER' needs to be enabled to use ca injection
|
||||
- webhookcainjection_patch.yaml
|
||||
|
||||
# [GH_WEBHOOK_SERVER] To enable the GitHub webhook server, uncomment all sections with 'GH_WEBHOOK_SERVER'.
|
||||
# Protect the GitHub webhook server metrics endpoint by putting it behind auth.
|
||||
# - gh-webhook-server-auth-proxy-patch.yaml
|
||||
|
||||
# the following config is for teaching kustomize how to do var substitution
|
||||
vars:
|
||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
||||
|
||||
@@ -2,11 +2,14 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
images:
|
||||
- name: controller
|
||||
newName: summerwind/actions-runner-controller
|
||||
newTag: latest
|
||||
- name: controller
|
||||
newName: summerwind/actions-runner-controller
|
||||
newTag: latest
|
||||
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- rbac.yaml
|
||||
- service.yaml
|
||||
- deployment.yaml
|
||||
- rbac.yaml
|
||||
- service.yaml
|
||||
|
||||
patchesStrategicMerge:
|
||||
- gh-webhook-server-auth-proxy-patch.yaml
|
||||
|
||||
@@ -202,6 +202,29 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
@@ -226,3 +249,36 @@ rules:
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- rolebindings
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- roles
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
|
||||
@@ -9,7 +9,10 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/google/go-github/v39/github"
|
||||
arcgithub "github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
"github.com/google/go-github/v45/github"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -19,7 +22,7 @@ const (
|
||||
defaultScaleDownFactor = 0.7
|
||||
)
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(ghc *arcgithub.Client, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
if hra.Spec.MinReplicas == nil {
|
||||
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
|
||||
} else if hra.Spec.MaxReplicas == nil {
|
||||
@@ -46,9 +49,9 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTa
|
||||
|
||||
switch primaryMetricType {
|
||||
case v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns:
|
||||
suggested, err = r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, &primaryMetric)
|
||||
suggested, err = r.suggestReplicasByQueuedAndInProgressWorkflowRuns(ghc, st, hra, &primaryMetric)
|
||||
case v1alpha1.AutoscalingMetricTypePercentageRunnersBusy:
|
||||
suggested, err = r.suggestReplicasByPercentageRunnersBusy(st, hra, primaryMetric)
|
||||
suggested, err = r.suggestReplicasByPercentageRunnersBusy(ghc, st, hra, primaryMetric)
|
||||
default:
|
||||
return nil, fmt.Errorf("validating autoscaling metrics: unsupported metric type %q", primaryMetric)
|
||||
}
|
||||
@@ -81,11 +84,10 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTa
|
||||
)
|
||||
}
|
||||
|
||||
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, &fallbackMetric)
|
||||
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(ghc, st, hra, &fallbackMetric)
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics *v1alpha1.MetricSpec) (*int, error) {
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(ghc *arcgithub.Client, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics *v1alpha1.MetricSpec) (*int, error) {
|
||||
var repos [][]string
|
||||
repoID := st.repo
|
||||
if repoID == "" {
|
||||
@@ -124,7 +126,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
||||
opt := github.ListWorkflowJobsOptions{ListOptions: github.ListOptions{PerPage: 50}}
|
||||
var allJobs []*github.WorkflowJob
|
||||
for {
|
||||
jobs, resp, err := r.GitHubClient.Actions.ListWorkflowJobs(context.TODO(), user, repoName, runID, &opt)
|
||||
jobs, resp, err := ghc.Actions.ListWorkflowJobs(context.TODO(), user, repoName, runID, &opt)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Error listing workflow jobs")
|
||||
return //err
|
||||
@@ -182,7 +184,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
||||
|
||||
for _, repo := range repos {
|
||||
user, repoName := repo[0], repo[1]
|
||||
workflowRuns, err := r.GitHubClient.ListRepositoryWorkflowRuns(context.TODO(), user, repoName)
|
||||
workflowRuns, err := ghc.ListRepositoryWorkflowRuns(context.TODO(), user, repoName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -224,7 +226,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
||||
return &necessaryReplicas, nil
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics v1alpha1.MetricSpec) (*int, error) {
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(ghc *arcgithub.Client, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics v1alpha1.MetricSpec) (*int, error) {
|
||||
ctx := context.Background()
|
||||
scaleUpThreshold := defaultScaleUpThreshold
|
||||
scaleDownThreshold := defaultScaleDownThreshold
|
||||
@@ -293,7 +295,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
||||
)
|
||||
|
||||
// ListRunners will return all runners managed by GitHub - not restricted to ns
|
||||
runners, err := r.GitHubClient.ListRunners(
|
||||
runners, err := ghc.ListRunners(
|
||||
ctx,
|
||||
enterprise,
|
||||
organization,
|
||||
@@ -314,22 +316,52 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
||||
numRunners int
|
||||
numRunnersRegistered int
|
||||
numRunnersBusy int
|
||||
numTerminatingBusy int
|
||||
)
|
||||
|
||||
numRunners = len(runnerMap)
|
||||
|
||||
busyTerminatingRunnerPods := map[string]struct{}{}
|
||||
|
||||
kindLabel := LabelKeyRunnerDeploymentName
|
||||
if hra.Spec.ScaleTargetRef.Kind == "RunnerSet" {
|
||||
kindLabel = LabelKeyRunnerSetName
|
||||
}
|
||||
|
||||
var runnerPodList corev1.PodList
|
||||
if err := r.Client.List(ctx, &runnerPodList, client.InNamespace(hra.Namespace), client.MatchingLabels(map[string]string{
|
||||
kindLabel: hra.Spec.ScaleTargetRef.Name,
|
||||
})); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, p := range runnerPodList.Items {
|
||||
if p.Annotations[AnnotationKeyUnregistrationFailureMessage] != "" {
|
||||
busyTerminatingRunnerPods[p.Name] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for _, runner := range runners {
|
||||
if _, ok := runnerMap[*runner.Name]; ok {
|
||||
numRunnersRegistered++
|
||||
|
||||
if runner.GetBusy() {
|
||||
numRunnersBusy++
|
||||
} else if _, ok := busyTerminatingRunnerPods[*runner.Name]; ok {
|
||||
numTerminatingBusy++
|
||||
}
|
||||
|
||||
delete(busyTerminatingRunnerPods, *runner.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Remaining busyTerminatingRunnerPods are runners that were not on the ListRunners API response yet
|
||||
for range busyTerminatingRunnerPods {
|
||||
numTerminatingBusy++
|
||||
}
|
||||
|
||||
var desiredReplicas int
|
||||
fractionBusy := float64(numRunnersBusy) / float64(desiredReplicasBefore)
|
||||
fractionBusy := float64(numRunnersBusy+numTerminatingBusy) / float64(desiredReplicasBefore)
|
||||
if fractionBusy >= scaleUpThreshold {
|
||||
if scaleUpAdjustment > 0 {
|
||||
desiredReplicas = desiredReplicasBefore + scaleUpAdjustment
|
||||
@@ -358,6 +390,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
||||
"num_runners", numRunners,
|
||||
"num_runners_registered", numRunnersRegistered,
|
||||
"num_runners_busy", numRunnersBusy,
|
||||
"num_terminating_busy", numTerminatingBusy,
|
||||
"namespace", hra.Namespace,
|
||||
"kind", st.kind,
|
||||
"name", st.st,
|
||||
|
||||
@@ -330,7 +330,6 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
|
||||
h := &HorizontalRunnerAutoscalerReconciler{
|
||||
Log: log,
|
||||
GitHubClient: client,
|
||||
Scheme: scheme,
|
||||
DefaultScaleDownDelay: DefaultScaleDownDelay,
|
||||
}
|
||||
@@ -379,7 +378,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
|
||||
st := h.scaleTargetFromRD(context.Background(), rd)
|
||||
|
||||
got, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
|
||||
got, err := h.computeReplicasWithCache(client, log, metav1Now.Time, st, hra, minReplicas)
|
||||
if err != nil {
|
||||
if tc.err == "" {
|
||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||
@@ -720,7 +719,6 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
h := &HorizontalRunnerAutoscalerReconciler{
|
||||
Log: log,
|
||||
Scheme: scheme,
|
||||
GitHubClient: client,
|
||||
DefaultScaleDownDelay: DefaultScaleDownDelay,
|
||||
}
|
||||
|
||||
@@ -781,7 +779,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
|
||||
st := h.scaleTargetFromRD(context.Background(), rd)
|
||||
|
||||
got, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
|
||||
got, err := h.computeReplicasWithCache(client, log, metav1Now.Time, st, hra, minReplicas)
|
||||
if err != nil {
|
||||
if tc.err == "" {
|
||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||
|
||||
@@ -4,17 +4,22 @@ import "time"
|
||||
|
||||
const (
|
||||
LabelKeyRunnerSetName = "runnerset-name"
|
||||
LabelKeyRunner = "actions-runner"
|
||||
)
|
||||
|
||||
const (
|
||||
// This names requires at least one slash to work.
|
||||
// See https://github.com/google/knative-gcp/issues/378
|
||||
runnerPodFinalizerName = "actions.summerwind.dev/runner-pod"
|
||||
runnerPodFinalizerName = "actions.summerwind.dev/runner-pod"
|
||||
runnerLinkedResourcesFinalizerName = "actions.summerwind.dev/linked-resources"
|
||||
|
||||
annotationKeyPrefix = "actions-runner/"
|
||||
|
||||
AnnotationKeyLastRegistrationCheckTime = "actions-runner-controller/last-registration-check-time"
|
||||
|
||||
// AnnotationKeyUnregistrationFailureMessage is the annotation that is added onto the pod once it failed to be unregistered from GitHub due to e.g. 422 error
|
||||
AnnotationKeyUnregistrationFailureMessage = annotationKeyPrefix + "unregistration-failure-message"
|
||||
|
||||
// AnnotationKeyUnregistrationCompleteTimestamp is the annotation that is added onto the pod once the previously started unregistration process has been completed.
|
||||
AnnotationKeyUnregistrationCompleteTimestamp = annotationKeyPrefix + "unregistration-complete-timestamp"
|
||||
|
||||
@@ -47,8 +52,6 @@ const (
|
||||
// A pod that is timed out can be terminated if needed.
|
||||
registrationTimeout = 10 * time.Minute
|
||||
|
||||
defaultRegistrationCheckInterval = time.Minute
|
||||
|
||||
// DefaultRunnerPodRecreationDelayAfterWebhookScale is the delay until syncing the runners with the desired replicas
|
||||
// after a webhook-based scale up.
|
||||
// This is used to prevent ARC from recreating completed runner pods that are deleted soon without being used at all.
|
||||
@@ -63,4 +66,7 @@ const (
|
||||
|
||||
EnvVarRunnerName = "RUNNER_NAME"
|
||||
EnvVarRunnerToken = "RUNNER_TOKEN"
|
||||
|
||||
// defaultHookPath is path to the hook script used when the "containerMode: kubernetes" is specified
|
||||
defaultRunnerHookPath = "/runner/k8s/index.js"
|
||||
)
|
||||
|
||||
207
controllers/horizontal_runner_autoscaler_batch_scale.go
Normal file
207
controllers/horizontal_runner_autoscaler_batch_scale.go
Normal file
@@ -0,0 +1,207 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/go-logr/logr"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type batchScaler struct {
|
||||
Ctx context.Context
|
||||
Client client.Client
|
||||
Log logr.Logger
|
||||
interval time.Duration
|
||||
|
||||
queue chan *ScaleTarget
|
||||
workerStart sync.Once
|
||||
}
|
||||
|
||||
func newBatchScaler(ctx context.Context, client client.Client, log logr.Logger) *batchScaler {
|
||||
return &batchScaler{
|
||||
Ctx: ctx,
|
||||
Client: client,
|
||||
Log: log,
|
||||
interval: 3 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
type batchScaleOperation struct {
|
||||
namespacedName types.NamespacedName
|
||||
scaleOps []scaleOperation
|
||||
}
|
||||
|
||||
type scaleOperation struct {
|
||||
trigger v1alpha1.ScaleUpTrigger
|
||||
log logr.Logger
|
||||
}
|
||||
|
||||
// Add the scale target to the unbounded queue, blocking until the target is successfully added to the queue.
|
||||
// All the targets in the queue are dequeued every 3 seconds, grouped by the HRA, and applied.
|
||||
// In a happy path, batchScaler update each HRA only once, even though the HRA had two or more associated webhook events in the 3 seconds interval,
|
||||
// which results in less K8s API calls and less HRA update conflicts in case your ARC installation receives a lot of webhook events
|
||||
func (s *batchScaler) Add(st *ScaleTarget) {
|
||||
if st == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s.workerStart.Do(func() {
|
||||
var expBackoff = []time.Duration{time.Second, 2 * time.Second, 4 * time.Second, 8 * time.Second, 16 * time.Second}
|
||||
|
||||
s.queue = make(chan *ScaleTarget)
|
||||
|
||||
log := s.Log
|
||||
|
||||
go func() {
|
||||
log.Info("Starting batch worker")
|
||||
defer log.Info("Stopped batch worker")
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.Ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
log.V(2).Info("Batch worker is dequeueing operations")
|
||||
|
||||
batches := map[types.NamespacedName]batchScaleOperation{}
|
||||
after := time.After(s.interval)
|
||||
var ops uint
|
||||
|
||||
batch:
|
||||
for {
|
||||
select {
|
||||
case <-after:
|
||||
after = nil
|
||||
break batch
|
||||
case st := <-s.queue:
|
||||
nsName := types.NamespacedName{
|
||||
Namespace: st.HorizontalRunnerAutoscaler.Namespace,
|
||||
Name: st.HorizontalRunnerAutoscaler.Name,
|
||||
}
|
||||
b, ok := batches[nsName]
|
||||
if !ok {
|
||||
b = batchScaleOperation{
|
||||
namespacedName: nsName,
|
||||
}
|
||||
}
|
||||
b.scaleOps = append(b.scaleOps, scaleOperation{
|
||||
log: *st.log,
|
||||
trigger: st.ScaleUpTrigger,
|
||||
})
|
||||
batches[nsName] = b
|
||||
ops++
|
||||
}
|
||||
}
|
||||
|
||||
log.V(2).Info("Batch worker dequeued operations", "ops", ops, "batches", len(batches))
|
||||
|
||||
retry:
|
||||
for i := 0; ; i++ {
|
||||
failed := map[types.NamespacedName]batchScaleOperation{}
|
||||
|
||||
for nsName, b := range batches {
|
||||
b := b
|
||||
if err := s.batchScale(context.Background(), b); err != nil {
|
||||
log.V(2).Info("Failed to scale due to error", "error", err)
|
||||
failed[nsName] = b
|
||||
} else {
|
||||
log.V(2).Info("Successfully ran batch scale", "hra", b.namespacedName)
|
||||
}
|
||||
}
|
||||
|
||||
if len(failed) == 0 {
|
||||
break retry
|
||||
}
|
||||
|
||||
batches = failed
|
||||
|
||||
delay := 16 * time.Second
|
||||
if i < len(expBackoff) {
|
||||
delay = expBackoff[i]
|
||||
}
|
||||
time.Sleep(delay)
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
|
||||
s.queue <- st
|
||||
}
|
||||
|
||||
func (s *batchScaler) batchScale(ctx context.Context, batch batchScaleOperation) error {
|
||||
var hra v1alpha1.HorizontalRunnerAutoscaler
|
||||
|
||||
if err := s.Client.Get(ctx, batch.namespacedName, &hra); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
copy := hra.DeepCopy()
|
||||
|
||||
copy.Spec.CapacityReservations = getValidCapacityReservations(copy)
|
||||
|
||||
var added, completed int
|
||||
|
||||
for _, scale := range batch.scaleOps {
|
||||
amount := 1
|
||||
|
||||
if scale.trigger.Amount != 0 {
|
||||
amount = scale.trigger.Amount
|
||||
}
|
||||
|
||||
scale.log.V(2).Info("Adding capacity reservation", "amount", amount)
|
||||
|
||||
if amount > 0 {
|
||||
now := time.Now()
|
||||
copy.Spec.CapacityReservations = append(copy.Spec.CapacityReservations, v1alpha1.CapacityReservation{
|
||||
EffectiveTime: metav1.Time{Time: now},
|
||||
ExpirationTime: metav1.Time{Time: now.Add(scale.trigger.Duration.Duration)},
|
||||
Replicas: amount,
|
||||
})
|
||||
|
||||
added += amount
|
||||
} else if amount < 0 {
|
||||
var reservations []v1alpha1.CapacityReservation
|
||||
|
||||
var found bool
|
||||
|
||||
for _, r := range copy.Spec.CapacityReservations {
|
||||
if !found && r.Replicas+amount == 0 {
|
||||
found = true
|
||||
} else {
|
||||
reservations = append(reservations, r)
|
||||
}
|
||||
}
|
||||
|
||||
copy.Spec.CapacityReservations = reservations
|
||||
|
||||
completed += amount
|
||||
}
|
||||
}
|
||||
|
||||
before := len(hra.Spec.CapacityReservations)
|
||||
expired := before - len(copy.Spec.CapacityReservations)
|
||||
after := len(copy.Spec.CapacityReservations)
|
||||
|
||||
s.Log.V(1).Info(
|
||||
fmt.Sprintf("Updating hra %s for capacityReservations update", hra.Name),
|
||||
"before", before,
|
||||
"expired", expired,
|
||||
"added", added,
|
||||
"completed", completed,
|
||||
"after", after,
|
||||
)
|
||||
|
||||
if err := s.Client.Update(ctx, copy); err != nil {
|
||||
return fmt.Errorf("updating horizontalrunnerautoscaler to add capacity reservation: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -23,14 +23,14 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
gogithub "github.com/google/go-github/v39/github"
|
||||
gogithub "github.com/google/go-github/v45/github"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -46,6 +46,8 @@ const (
|
||||
|
||||
keyPrefixEnterprise = "enterprises/"
|
||||
keyRunnerGroup = "/group/"
|
||||
|
||||
DefaultQueueLimit = 100
|
||||
)
|
||||
|
||||
// HorizontalRunnerAutoscalerGitHubWebhook autoscales a HorizontalRunnerAutoscaler and the RunnerDeployment on each
|
||||
@@ -68,6 +70,15 @@ type HorizontalRunnerAutoscalerGitHubWebhook struct {
|
||||
// Set to empty for letting it watch for all namespaces.
|
||||
Namespace string
|
||||
Name string
|
||||
|
||||
// QueueLimit is the maximum length of the bounded queue of scale targets and their associated operations
|
||||
// A scale target is enqueued on each retrieval of each eligible webhook event, so that it is processed asynchronously.
|
||||
QueueLimit int
|
||||
|
||||
worker *worker
|
||||
workerInit sync.Once
|
||||
workerStart sync.Once
|
||||
batchCh chan *ScaleTarget
|
||||
}
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||
@@ -312,9 +323,19 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
||||
return
|
||||
}
|
||||
|
||||
if err := autoscaler.tryScale(context.TODO(), target); err != nil {
|
||||
log.Error(err, "could not scale up")
|
||||
autoscaler.workerInit.Do(func() {
|
||||
batchScaler := newBatchScaler(context.Background(), autoscaler.Client, autoscaler.Log)
|
||||
|
||||
queueLimit := autoscaler.QueueLimit
|
||||
if queueLimit == 0 {
|
||||
queueLimit = DefaultQueueLimit
|
||||
}
|
||||
autoscaler.worker = newWorker(context.Background(), queueLimit, batchScaler.Add)
|
||||
})
|
||||
|
||||
target.log = &log
|
||||
if ok := autoscaler.worker.Add(target); !ok {
|
||||
log.Error(err, "Could not scale up due to queue full")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -383,6 +404,8 @@ func matchTriggerConditionAgainstEvent(types []string, eventAction *string) bool
|
||||
type ScaleTarget struct {
|
||||
v1alpha1.HorizontalRunnerAutoscaler
|
||||
v1alpha1.ScaleUpTrigger
|
||||
|
||||
log *logr.Logger
|
||||
}
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) searchScaleTargets(hras []v1alpha1.HorizontalRunnerAutoscaler, f func(v1alpha1.ScaleUpTrigger) bool) []ScaleTarget {
|
||||
@@ -501,6 +524,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleUpTargetWithF
|
||||
if autoscaler.GitHubClient != nil {
|
||||
simu := &simulator.Simulator{
|
||||
Client: autoscaler.GitHubClient,
|
||||
Log: log,
|
||||
}
|
||||
// Get available organization runner groups and enterprise runner groups for a repository
|
||||
// These are the sum of runner groups with repository access = All repositories and runner groups
|
||||
@@ -770,63 +794,6 @@ HRA:
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) tryScale(ctx context.Context, target *ScaleTarget) error {
|
||||
if target == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
copy := target.HorizontalRunnerAutoscaler.DeepCopy()
|
||||
|
||||
amount := 1
|
||||
|
||||
if target.ScaleUpTrigger.Amount != 0 {
|
||||
amount = target.ScaleUpTrigger.Amount
|
||||
}
|
||||
|
||||
capacityReservations := getValidCapacityReservations(copy)
|
||||
|
||||
if amount > 0 {
|
||||
now := time.Now()
|
||||
copy.Spec.CapacityReservations = append(capacityReservations, v1alpha1.CapacityReservation{
|
||||
EffectiveTime: metav1.Time{Time: now},
|
||||
ExpirationTime: metav1.Time{Time: now.Add(target.ScaleUpTrigger.Duration.Duration)},
|
||||
Replicas: amount,
|
||||
})
|
||||
} else if amount < 0 {
|
||||
var reservations []v1alpha1.CapacityReservation
|
||||
|
||||
var found bool
|
||||
|
||||
for _, r := range capacityReservations {
|
||||
if !found && r.Replicas+amount == 0 {
|
||||
found = true
|
||||
} else {
|
||||
reservations = append(reservations, r)
|
||||
}
|
||||
}
|
||||
|
||||
copy.Spec.CapacityReservations = reservations
|
||||
}
|
||||
|
||||
before := len(target.HorizontalRunnerAutoscaler.Spec.CapacityReservations)
|
||||
expired := before - len(capacityReservations)
|
||||
after := len(copy.Spec.CapacityReservations)
|
||||
|
||||
autoscaler.Log.V(1).Info(
|
||||
fmt.Sprintf("Patching hra %s for capacityReservations update", target.HorizontalRunnerAutoscaler.Name),
|
||||
"before", before,
|
||||
"expired", expired,
|
||||
"amount", amount,
|
||||
"after", after,
|
||||
)
|
||||
|
||||
if err := autoscaler.Client.Patch(ctx, copy, client.MergeFrom(&target.HorizontalRunnerAutoscaler)); err != nil {
|
||||
return fmt.Errorf("patching horizontalrunnerautoscaler to add capacity reservation: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getValidCapacityReservations(autoscaler *v1alpha1.HorizontalRunnerAutoscaler) []v1alpha1.CapacityReservation {
|
||||
var capacityReservations []v1alpha1.CapacityReservation
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package controllers
|
||||
import (
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/pkg/actionsglob"
|
||||
"github.com/google/go-github/v39/github"
|
||||
"github.com/google/go-github/v45/github"
|
||||
)
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||
|
||||
@@ -2,7 +2,7 @@ package controllers
|
||||
|
||||
import (
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/google/go-github/v39/github"
|
||||
"github.com/google/go-github/v45/github"
|
||||
)
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPullRequestEvent(event *github.PullRequestEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||
|
||||
@@ -2,7 +2,7 @@ package controllers
|
||||
|
||||
import (
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/google/go-github/v39/github"
|
||||
"github.com/google/go-github/v45/github"
|
||||
)
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event *github.PushEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||
@@ -15,10 +15,6 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event
|
||||
|
||||
push := g.Push
|
||||
|
||||
if push == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return push != nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/google/go-github/v39/github"
|
||||
"github.com/google/go-github/v45/github"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
55
controllers/horizontal_runner_autoscaler_webhook_worker.go
Normal file
55
controllers/horizontal_runner_autoscaler_webhook_worker.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// worker is a worker that has a non-blocking bounded queue of scale targets, dequeues scale target and executes the scale operation one by one.
|
||||
type worker struct {
|
||||
scaleTargetQueue chan *ScaleTarget
|
||||
work func(*ScaleTarget)
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func newWorker(ctx context.Context, queueLimit int, work func(*ScaleTarget)) *worker {
|
||||
w := &worker{
|
||||
scaleTargetQueue: make(chan *ScaleTarget, queueLimit),
|
||||
work: work,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(w.done)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case t := <-w.scaleTargetQueue:
|
||||
work(t)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
// Add the scale target to the bounded queue, returning the result as a bool value. It returns true on successful enqueue, and returns false otherwise.
|
||||
// When returned false, the queue is already full so the enqueue operation must be retried later.
|
||||
// If the enqueue was triggered by an external source and there's no intermediate queue that we can use,
|
||||
// you must instruct the source to resend the original request later.
|
||||
// In case you're building a webhook server around this worker, this means that you must return a http error to the webhook server,
|
||||
// so that (hopefully) the sender can resend the webhook event later, or at least the human operator can notice or be notified about the
|
||||
// webhook develiery failure so that a manual retry can be done later.
|
||||
func (w *worker) Add(st *ScaleTarget) bool {
|
||||
select {
|
||||
case w.scaleTargetQueue <- st:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (w *worker) Done() chan struct{} {
|
||||
return w.done
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestWorker_Add(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
w := newWorker(ctx, 2, func(st *ScaleTarget) {})
|
||||
require.True(t, w.Add(&ScaleTarget{}))
|
||||
require.True(t, w.Add(&ScaleTarget{}))
|
||||
require.False(t, w.Add(&ScaleTarget{}))
|
||||
}
|
||||
|
||||
func TestWorker_Work(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var count int
|
||||
|
||||
w := newWorker(ctx, 1, func(st *ScaleTarget) {
|
||||
count++
|
||||
cancel()
|
||||
})
|
||||
require.True(t, w.Add(&ScaleTarget{}))
|
||||
require.False(t, w.Add(&ScaleTarget{}))
|
||||
|
||||
<-w.Done()
|
||||
|
||||
require.Equal(t, count, 1)
|
||||
}
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
"github.com/go-logr/logr"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -38,6 +37,7 @@ import (
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/controllers/metrics"
|
||||
arcgithub "github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -47,11 +47,10 @@ const (
|
||||
// HorizontalRunnerAutoscalerReconciler reconciles a HorizontalRunnerAutoscaler object
|
||||
type HorizontalRunnerAutoscalerReconciler struct {
|
||||
client.Client
|
||||
GitHubClient *github.Client
|
||||
GitHubClient *MultiGitHubClient
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
CacheDuration time.Duration
|
||||
DefaultScaleDownDelay time.Duration
|
||||
Name string
|
||||
}
|
||||
@@ -73,6 +72,8 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
||||
}
|
||||
|
||||
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
r.GitHubClient.DeinitForHRA(&hra)
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -310,7 +311,12 @@ func (r *HorizontalRunnerAutoscalerReconciler) reconcile(ctx context.Context, re
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
newDesiredReplicas, err := r.computeReplicasWithCache(log, now, st, hra, minReplicas)
|
||||
ghc, err := r.GitHubClient.InitForHRA(context.Background(), &hra)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
newDesiredReplicas, err := r.computeReplicasWithCache(ghc, log, now, st, hra, minReplicas)
|
||||
if err != nil {
|
||||
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||
|
||||
@@ -461,10 +467,10 @@ func (r *HorizontalRunnerAutoscalerReconciler) getMinReplicas(log logr.Logger, n
|
||||
return minReplicas, active, upcoming, nil
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr.Logger, now time.Time, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, minReplicas int) (int, error) {
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(ghc *arcgithub.Client, log logr.Logger, now time.Time, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, minReplicas int) (int, error) {
|
||||
var suggestedReplicas int
|
||||
|
||||
v, err := r.suggestDesiredReplicas(st, hra)
|
||||
v, err := r.suggestDesiredReplicas(ghc, st, hra)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
github2 "github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
"github.com/google/go-github/v39/github"
|
||||
"github.com/google/go-github/v45/github"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
||||
|
||||
@@ -99,12 +99,14 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
|
||||
return fmt.Sprintf("%s%s", ns.Name, name)
|
||||
}
|
||||
|
||||
multiClient := NewMultiGitHubClient(mgr.GetClient(), env.ghClient)
|
||||
|
||||
runnerController := &RunnerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
Log: logf.Log,
|
||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||
GitHubClient: env.ghClient,
|
||||
GitHubClient: multiClient,
|
||||
RunnerImage: "example/runner:test",
|
||||
DockerImage: "example/docker:test",
|
||||
Name: controllerName("runner"),
|
||||
@@ -116,12 +118,11 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller")
|
||||
|
||||
replicasetController := &RunnerReplicaSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
Log: logf.Log,
|
||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||
GitHubClient: env.ghClient,
|
||||
Name: controllerName("runnerreplicaset"),
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
Log: logf.Log,
|
||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||
Name: controllerName("runnerreplicaset"),
|
||||
}
|
||||
err = replicasetController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerreplicaset controller")
|
||||
@@ -137,13 +138,12 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerdeployment controller")
|
||||
|
||||
autoscalerController := &HorizontalRunnerAutoscalerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
Log: logf.Log,
|
||||
GitHubClient: env.ghClient,
|
||||
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
|
||||
CacheDuration: 1 * time.Second,
|
||||
Name: controllerName("horizontalrunnerautoscaler"),
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
Log: logf.Log,
|
||||
GitHubClient: multiClient,
|
||||
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
|
||||
Name: controllerName("horizontalrunnerautoscaler"),
|
||||
}
|
||||
err = autoscalerController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup autoscaler controller")
|
||||
@@ -1367,7 +1367,7 @@ func (env *testEnvironment) ExpectRegisteredNumberCountEventuallyEquals(want int
|
||||
|
||||
return len(rs)
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(Equal(want), optionalDescriptions...)
|
||||
time.Second*10, time.Millisecond*500).Should(Equal(want), optionalDescriptions...)
|
||||
}
|
||||
|
||||
func (env *testEnvironment) SendOrgPullRequestEvent(org, repo, branch, action string) {
|
||||
|
||||
389
controllers/multi_githubclient.go
Normal file
389
controllers/multi_githubclient.go
Normal file
@@ -0,0 +1,389 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
// The api creds scret annotation is added by the runner controller or the runnerset controller according to runner.spec.githubAPICredentialsFrom.secretRef.name,
|
||||
// so that the runner pod controller can share the same GitHub API credentials and the instance of the GitHub API client with the upstream controllers.
|
||||
annotationKeyGitHubAPICredsSecret = annotationKeyPrefix + "github-api-creds-secret"
|
||||
)
|
||||
|
||||
type runnerOwnerRef struct {
|
||||
// kind is either StatefulSet or Runner, and populated via the owner reference in the runner pod controller or via the reconcilation target's kind in
|
||||
// runnerset and runner controllers.
|
||||
kind string
|
||||
ns, name string
|
||||
}
|
||||
|
||||
type secretRef struct {
|
||||
ns, name string
|
||||
}
|
||||
|
||||
// savedClient is the each cache entry that contains the client for the specific set of credentials,
|
||||
// like a PAT or a pair of key and cert.
|
||||
// the `hash` is a part of the savedClient not the key because we are going to keep only the client for the latest creds
|
||||
// in case the operator updated the k8s secret containing the credentials.
|
||||
type savedClient struct {
|
||||
hash string
|
||||
|
||||
// refs is the map of all the objects that references this client, used for reference counting to gc
|
||||
// the client if unneeded.
|
||||
refs map[runnerOwnerRef]struct{}
|
||||
|
||||
*github.Client
|
||||
}
|
||||
|
||||
type resourceReader interface {
|
||||
Get(context.Context, types.NamespacedName, client.Object) error
|
||||
}
|
||||
|
||||
type MultiGitHubClient struct {
|
||||
mu sync.Mutex
|
||||
|
||||
client resourceReader
|
||||
|
||||
githubClient *github.Client
|
||||
|
||||
// The saved client is freed once all its dependents disappear, or the contents of the secret changed.
|
||||
// We track dependents via a golang map embedded within the savedClient struct. Each dependent is checked on their respective Kubernetes finalizer,
|
||||
// so that we won't miss any dependent's termination.
|
||||
// The change is the secret is determined using the hash of its contents.
|
||||
clients map[secretRef]savedClient
|
||||
}
|
||||
|
||||
func NewMultiGitHubClient(client resourceReader, githubClient *github.Client) *MultiGitHubClient {
|
||||
return &MultiGitHubClient{
|
||||
client: client,
|
||||
githubClient: githubClient,
|
||||
clients: map[secretRef]savedClient{},
|
||||
}
|
||||
}
|
||||
|
||||
// Init sets up and return the *github.Client for the object.
|
||||
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
|
||||
func (c *MultiGitHubClient) InitForRunnerPod(ctx context.Context, pod *corev1.Pod) (*github.Client, error) {
|
||||
// These 3 default values are used only when the user created the pod directly, not via Runner, RunnerReplicaSet, RunnerDeploment, or RunnerSet resources.
|
||||
ref := refFromRunnerPod(pod)
|
||||
secretName := pod.Annotations[annotationKeyGitHubAPICredsSecret]
|
||||
|
||||
// kind can be any of Pod, Runner, RunnerReplicaSet, RunnerDeployment, or RunnerSet depending on which custom resource the user directly created.
|
||||
return c.initClientWithSecretName(ctx, pod.Namespace, secretName, ref)
|
||||
}
|
||||
|
||||
// Init sets up and return the *github.Client for the object.
|
||||
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
|
||||
func (c *MultiGitHubClient) InitForRunner(ctx context.Context, r *v1alpha1.Runner) (*github.Client, error) {
|
||||
var secretName string
|
||||
if r.Spec.GitHubAPICredentialsFrom != nil {
|
||||
secretName = r.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||
}
|
||||
|
||||
// These 3 default values are used only when the user created the runner resource directly, not via RunnerReplicaSet, RunnerDeploment, or RunnerSet resources.
|
||||
ref := refFromRunner(r)
|
||||
if ref.ns != r.Namespace {
|
||||
return nil, fmt.Errorf("referencing github api creds secret from owner in another namespace is not supported yet")
|
||||
}
|
||||
|
||||
// kind can be any of Runner, RunnerReplicaSet, or RunnerDeployment depending on which custom resource the user directly created.
|
||||
return c.initClientWithSecretName(ctx, r.Namespace, secretName, ref)
|
||||
}
|
||||
|
||||
// Init sets up and return the *github.Client for the object.
|
||||
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
|
||||
func (c *MultiGitHubClient) InitForRunnerSet(ctx context.Context, rs *v1alpha1.RunnerSet) (*github.Client, error) {
|
||||
ref := refFromRunnerSet(rs)
|
||||
|
||||
var secretName string
|
||||
if rs.Spec.GitHubAPICredentialsFrom != nil {
|
||||
secretName = rs.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||
}
|
||||
|
||||
return c.initClientWithSecretName(ctx, rs.Namespace, secretName, ref)
|
||||
}
|
||||
|
||||
// Init sets up and return the *github.Client for the object.
|
||||
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
|
||||
func (c *MultiGitHubClient) InitForHRA(ctx context.Context, hra *v1alpha1.HorizontalRunnerAutoscaler) (*github.Client, error) {
|
||||
ref := refFromHorizontalRunnerAutoscaler(hra)
|
||||
|
||||
var secretName string
|
||||
if hra.Spec.GitHubAPICredentialsFrom != nil {
|
||||
secretName = hra.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||
}
|
||||
|
||||
return c.initClientWithSecretName(ctx, hra.Namespace, secretName, ref)
|
||||
}
|
||||
|
||||
func (c *MultiGitHubClient) DeinitForRunnerPod(p *corev1.Pod) {
|
||||
secretName := p.Annotations[annotationKeyGitHubAPICredsSecret]
|
||||
c.derefClient(p.Namespace, secretName, refFromRunnerPod(p))
|
||||
}
|
||||
|
||||
func (c *MultiGitHubClient) DeinitForRunner(r *v1alpha1.Runner) {
|
||||
var secretName string
|
||||
if r.Spec.GitHubAPICredentialsFrom != nil {
|
||||
secretName = r.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||
}
|
||||
|
||||
c.derefClient(r.Namespace, secretName, refFromRunner(r))
|
||||
}
|
||||
|
||||
func (c *MultiGitHubClient) DeinitForRunnerSet(rs *v1alpha1.RunnerSet) {
|
||||
var secretName string
|
||||
if rs.Spec.GitHubAPICredentialsFrom != nil {
|
||||
secretName = rs.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||
}
|
||||
|
||||
c.derefClient(rs.Namespace, secretName, refFromRunnerSet(rs))
|
||||
}
|
||||
|
||||
func (c *MultiGitHubClient) deinitClientForRunnerReplicaSet(rs *v1alpha1.RunnerReplicaSet) {
|
||||
c.derefClient(rs.Namespace, rs.Spec.Template.Spec.GitHubAPICredentialsFrom.SecretRef.Name, refFromRunnerReplicaSet(rs))
|
||||
}
|
||||
|
||||
func (c *MultiGitHubClient) deinitClientForRunnerDeployment(rd *v1alpha1.RunnerDeployment) {
|
||||
c.derefClient(rd.Namespace, rd.Spec.Template.Spec.GitHubAPICredentialsFrom.SecretRef.Name, refFromRunnerDeployment(rd))
|
||||
}
|
||||
|
||||
func (c *MultiGitHubClient) DeinitForHRA(hra *v1alpha1.HorizontalRunnerAutoscaler) {
|
||||
var secretName string
|
||||
if hra.Spec.GitHubAPICredentialsFrom != nil {
|
||||
secretName = hra.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||
}
|
||||
|
||||
c.derefClient(hra.Namespace, secretName, refFromHorizontalRunnerAutoscaler(hra))
|
||||
}
|
||||
|
||||
func (c *MultiGitHubClient) initClientForSecret(secret *corev1.Secret, dependent *runnerOwnerRef) (*savedClient, error) {
|
||||
secRef := secretRef{
|
||||
ns: secret.Namespace,
|
||||
name: secret.Name,
|
||||
}
|
||||
|
||||
cliRef := c.clients[secRef]
|
||||
|
||||
var ks []string
|
||||
|
||||
for k := range secret.Data {
|
||||
ks = append(ks, k)
|
||||
}
|
||||
|
||||
sort.SliceStable(ks, func(i, j int) bool { return ks[i] < ks[j] })
|
||||
|
||||
hash := sha1.New()
|
||||
for _, k := range ks {
|
||||
hash.Write(secret.Data[k])
|
||||
}
|
||||
hashStr := hex.EncodeToString(hash.Sum(nil))
|
||||
|
||||
if cliRef.hash != hashStr {
|
||||
delete(c.clients, secRef)
|
||||
|
||||
conf, err := secretDataToGitHubClientConfig(secret.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cli, err := conf.NewClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cliRef = savedClient{
|
||||
hash: hashStr,
|
||||
refs: map[runnerOwnerRef]struct{}{},
|
||||
Client: cli,
|
||||
}
|
||||
|
||||
c.clients[secRef] = cliRef
|
||||
}
|
||||
|
||||
if dependent != nil {
|
||||
c.clients[secRef].refs[*dependent] = struct{}{}
|
||||
}
|
||||
|
||||
return &cliRef, nil
|
||||
}
|
||||
|
||||
func (c *MultiGitHubClient) initClientWithSecretName(ctx context.Context, ns, secretName string, runRef *runnerOwnerRef) (*github.Client, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if secretName == "" {
|
||||
return c.githubClient, nil
|
||||
}
|
||||
|
||||
secRef := secretRef{
|
||||
ns: ns,
|
||||
name: secretName,
|
||||
}
|
||||
|
||||
if _, ok := c.clients[secRef]; !ok {
|
||||
c.clients[secRef] = savedClient{}
|
||||
}
|
||||
|
||||
var sec corev1.Secret
|
||||
if err := c.client.Get(ctx, types.NamespacedName{Namespace: ns, Name: secretName}, &sec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
savedClient, err := c.initClientForSecret(&sec, runRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return savedClient.Client, nil
|
||||
}
|
||||
|
||||
func (c *MultiGitHubClient) derefClient(ns, secretName string, dependent *runnerOwnerRef) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
secRef := secretRef{
|
||||
ns: ns,
|
||||
name: secretName,
|
||||
}
|
||||
|
||||
if dependent != nil {
|
||||
delete(c.clients[secRef].refs, *dependent)
|
||||
}
|
||||
|
||||
cliRef := c.clients[secRef]
|
||||
|
||||
if dependent == nil || len(cliRef.refs) == 0 {
|
||||
delete(c.clients, secRef)
|
||||
}
|
||||
}
|
||||
|
||||
func decodeBase64(s []byte) (string, error) {
|
||||
enc := base64.RawStdEncoding
|
||||
dbuf := make([]byte, enc.DecodedLen(len(s)))
|
||||
n, err := enc.Decode(dbuf, []byte(s))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(dbuf[:n]), nil
|
||||
}
|
||||
|
||||
func secretDataToGitHubClientConfig(data map[string][]byte) (*github.Config, error) {
|
||||
var (
|
||||
conf github.Config
|
||||
|
||||
err error
|
||||
)
|
||||
|
||||
conf.URL, err = decodeBase64(data["github_url"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf.UploadURL, err = decodeBase64(data["github_upload_url"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf.EnterpriseURL, err = decodeBase64(data["github_enterprise_url"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf.RunnerGitHubURL, err = decodeBase64(data["github_runner_url"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf.Token, err = decodeBase64(data["github_token"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
appID, err := decodeBase64(data["github_app_id"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf.AppID, err = strconv.ParseInt(appID, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instID, err := decodeBase64(data["github_app_installation_id"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf.AppInstallationID, err = strconv.ParseInt(instID, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conf.AppPrivateKey, err = decodeBase64(data["github_app_private_key"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &conf, nil
|
||||
}
|
||||
|
||||
func refFromRunnerDeployment(rd *v1alpha1.RunnerDeployment) *runnerOwnerRef {
|
||||
return &runnerOwnerRef{
|
||||
kind: rd.Kind,
|
||||
ns: rd.Namespace,
|
||||
name: rd.Name,
|
||||
}
|
||||
}
|
||||
|
||||
func refFromRunnerReplicaSet(rs *v1alpha1.RunnerReplicaSet) *runnerOwnerRef {
|
||||
return &runnerOwnerRef{
|
||||
kind: rs.Kind,
|
||||
ns: rs.Namespace,
|
||||
name: rs.Name,
|
||||
}
|
||||
}
|
||||
|
||||
func refFromRunner(r *v1alpha1.Runner) *runnerOwnerRef {
|
||||
return &runnerOwnerRef{
|
||||
kind: r.Kind,
|
||||
ns: r.Namespace,
|
||||
name: r.Name,
|
||||
}
|
||||
}
|
||||
|
||||
func refFromRunnerPod(po *corev1.Pod) *runnerOwnerRef {
|
||||
return &runnerOwnerRef{
|
||||
kind: po.Kind,
|
||||
ns: po.Namespace,
|
||||
name: po.Name,
|
||||
}
|
||||
}
|
||||
func refFromRunnerSet(rs *v1alpha1.RunnerSet) *runnerOwnerRef {
|
||||
return &runnerOwnerRef{
|
||||
kind: rs.Kind,
|
||||
ns: rs.Namespace,
|
||||
name: rs.Name,
|
||||
}
|
||||
}
|
||||
|
||||
func refFromHorizontalRunnerAutoscaler(hra *v1alpha1.HorizontalRunnerAutoscaler) *runnerOwnerRef {
|
||||
return &runnerOwnerRef{
|
||||
kind: hra.Kind,
|
||||
ns: hra.Namespace,
|
||||
name: hra.Name,
|
||||
}
|
||||
}
|
||||
@@ -7,12 +7,45 @@ import (
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func newWorkGenericEphemeralVolume(t *testing.T, storageReq string) corev1.Volume {
|
||||
GBs, err := resource.ParseQuantity(storageReq)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
return corev1.Volume{
|
||||
Name: "work",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Ephemeral: &corev1.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &corev1.PersistentVolumeClaimTemplate{
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
corev1.ReadWriteOnce,
|
||||
},
|
||||
StorageClassName: strPtr("runner-work-dir"),
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: GBs,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRunnerPod(t *testing.T) {
|
||||
workGenericEphemeralVolume := newWorkGenericEphemeralVolume(t, "10Gi")
|
||||
|
||||
type testcase struct {
|
||||
description string
|
||||
|
||||
@@ -25,7 +58,7 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"actions-runner-controller/inject-registration-token": "true",
|
||||
"runnerset-name": "runner",
|
||||
"actions-runner": "",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
@@ -94,6 +127,10 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
Name: "RUNNER_EPHEMERAL",
|
||||
Value: "true",
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||
Value: "false",
|
||||
},
|
||||
{
|
||||
Name: "DOCKER_HOST",
|
||||
Value: "tcp://localhost:2376",
|
||||
@@ -106,10 +143,6 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
Name: "DOCKER_CERT_PATH",
|
||||
Value: "/certs/client",
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_FEATURE_FLAG_EPHEMERAL",
|
||||
Value: "true",
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
@@ -159,7 +192,7 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -171,7 +204,7 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"actions-runner-controller/inject-registration-token": "true",
|
||||
"runnerset-name": "runner",
|
||||
"actions-runner": "",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
@@ -229,8 +262,8 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
Value: "true",
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_FEATURE_FLAG_EPHEMERAL",
|
||||
Value: "true",
|
||||
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||
Value: "false",
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
@@ -245,7 +278,7 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -253,7 +286,7 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"actions-runner-controller/inject-registration-token": "true",
|
||||
"runnerset-name": "runner",
|
||||
"actions-runner": "",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
@@ -311,8 +344,8 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
Value: "true",
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_FEATURE_FLAG_EPHEMERAL",
|
||||
Value: "true",
|
||||
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||
Value: "false",
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
@@ -327,7 +360,7 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -400,8 +433,87 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
DockerEnabled: boolPtr(false),
|
||||
},
|
||||
want: newTestPod(dockerDisabled, func(p *corev1.Pod) {
|
||||
// TODO
|
||||
// p.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||
p.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||
}),
|
||||
},
|
||||
{
|
||||
description: "Mount generic ephemeral volume onto work (with explicit volumeMount)",
|
||||
template: corev1.Pod{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner",
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "work",
|
||||
MountPath: "/runner/_work",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
workGenericEphemeralVolume,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: newTestPod(base, func(p *corev1.Pod) {
|
||||
p.Spec.Volumes = []corev1.Volume{
|
||||
workGenericEphemeralVolume,
|
||||
{
|
||||
Name: "runner",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "certs-client",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
p.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
|
||||
{
|
||||
Name: "work",
|
||||
MountPath: "/runner/_work",
|
||||
},
|
||||
{
|
||||
Name: "runner",
|
||||
MountPath: "/runner",
|
||||
},
|
||||
{
|
||||
Name: "certs-client",
|
||||
MountPath: "/certs/client",
|
||||
ReadOnly: true,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
{
|
||||
description: "Mount generic ephemeral volume onto work (without explicit volumeMount)",
|
||||
template: corev1.Pod{
|
||||
Spec: corev1.PodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
workGenericEphemeralVolume,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: newTestPod(base, func(p *corev1.Pod) {
|
||||
p.Spec.Volumes = []corev1.Volume{
|
||||
workGenericEphemeralVolume,
|
||||
{
|
||||
Name: "runner",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "certs-client",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
}
|
||||
@@ -417,14 +529,20 @@ func TestNewRunnerPod(t *testing.T) {
|
||||
for i := range testcases {
|
||||
tc := testcases[i]
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
got, err := newRunnerPod("runner", tc.template, tc.config, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, false)
|
||||
got, err := newRunnerPod(tc.template, tc.config, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func strPtr(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
workGenericEphemeralVolume := newWorkGenericEphemeralVolume(t, "10Gi")
|
||||
|
||||
type testcase struct {
|
||||
description string
|
||||
|
||||
@@ -442,7 +560,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
Labels: map[string]string{
|
||||
"actions-runner-controller/inject-registration-token": "true",
|
||||
"pod-template-hash": "8857b86c7",
|
||||
"runnerset-name": "runner",
|
||||
"actions-runner": "",
|
||||
},
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
@@ -520,6 +638,10 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
Name: "RUNNER_EPHEMERAL",
|
||||
Value: "true",
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||
Value: "false",
|
||||
},
|
||||
{
|
||||
Name: "DOCKER_HOST",
|
||||
Value: "tcp://localhost:2376",
|
||||
@@ -532,10 +654,6 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
Name: "DOCKER_CERT_PATH",
|
||||
Value: "/certs/client",
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_FEATURE_FLAG_EPHEMERAL",
|
||||
Value: "true",
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_NAME",
|
||||
Value: "runner",
|
||||
@@ -593,7 +711,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -603,7 +721,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
Labels: map[string]string{
|
||||
"actions-runner-controller/inject-registration-token": "true",
|
||||
"pod-template-hash": "8857b86c7",
|
||||
"runnerset-name": "runner",
|
||||
"actions-runner": "",
|
||||
},
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
@@ -670,8 +788,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
Value: "true",
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_FEATURE_FLAG_EPHEMERAL",
|
||||
Value: "true",
|
||||
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||
Value: "false",
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_NAME",
|
||||
@@ -694,7 +812,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -704,7 +822,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
Labels: map[string]string{
|
||||
"actions-runner-controller/inject-registration-token": "true",
|
||||
"pod-template-hash": "8857b86c7",
|
||||
"runnerset-name": "runner",
|
||||
"actions-runner": "",
|
||||
},
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
@@ -771,8 +889,8 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
Value: "true",
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_FEATURE_FLAG_EPHEMERAL",
|
||||
Value: "true",
|
||||
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||
Value: "false",
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_NAME",
|
||||
@@ -795,7 +913,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -904,7 +1022,97 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
},
|
||||
|
||||
want: newTestPod(dockerDisabled, func(p *corev1.Pod) {
|
||||
// p.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||
p.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||
}),
|
||||
},
|
||||
{
|
||||
description: "Mount generic ephemeral volume onto work (with explicit volumeMount)",
|
||||
runner: arcv1alpha1.Runner{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "runner",
|
||||
},
|
||||
Spec: arcv1alpha1.RunnerSpec{
|
||||
RunnerPodSpec: arcv1alpha1.RunnerPodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner",
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "work",
|
||||
MountPath: "/runner/_work",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
workGenericEphemeralVolume,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: newTestPod(base, func(p *corev1.Pod) {
|
||||
p.Spec.Volumes = []corev1.Volume{
|
||||
{
|
||||
Name: "runner",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "certs-client",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
workGenericEphemeralVolume,
|
||||
}
|
||||
p.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
|
||||
{
|
||||
Name: "work",
|
||||
MountPath: "/runner/_work",
|
||||
},
|
||||
{
|
||||
Name: "runner",
|
||||
MountPath: "/runner",
|
||||
},
|
||||
{
|
||||
Name: "certs-client",
|
||||
MountPath: "/certs/client",
|
||||
ReadOnly: true,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
{
|
||||
description: "Mount generic ephemeral volume onto work (without explicit volumeMount)",
|
||||
runner: arcv1alpha1.Runner{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "runner",
|
||||
},
|
||||
Spec: arcv1alpha1.RunnerSpec{
|
||||
RunnerPodSpec: arcv1alpha1.RunnerPodSpec{
|
||||
Volumes: []corev1.Volume{
|
||||
workGenericEphemeralVolume,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: newTestPod(base, func(p *corev1.Pod) {
|
||||
p.Spec.Volumes = []corev1.Volume{
|
||||
{
|
||||
Name: "runner",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "certs-client",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
workGenericEphemeralVolume,
|
||||
}
|
||||
}),
|
||||
},
|
||||
}
|
||||
@@ -923,13 +1131,20 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||
|
||||
for i := range testcases {
|
||||
tc := testcases[i]
|
||||
|
||||
rr := &testResourceReader{
|
||||
objects: map[types.NamespacedName]client.Object{},
|
||||
}
|
||||
|
||||
multiClient := NewMultiGitHubClient(rr, &github.Client{GithubBaseURL: githubBaseURL})
|
||||
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
r := &RunnerReconciler{
|
||||
RunnerImage: defaultRunnerImage,
|
||||
RunnerImagePullSecrets: defaultRunnerImagePullSecrets,
|
||||
DockerImage: defaultDockerImage,
|
||||
DockerRegistryMirror: defaultDockerRegistryMirror,
|
||||
GitHubClient: &github.Client{GithubBaseURL: githubBaseURL},
|
||||
GitHubClient: multiClient,
|
||||
Scheme: scheme,
|
||||
}
|
||||
got, err := r.newPod(tc.runner)
|
||||
|
||||
74
controllers/persistent_volume_claim_controller.go
Normal file
74
controllers/persistent_volume_claim_controller.go
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2022 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// RunnerPersistentVolumeClaimReconciler reconciles a PersistentVolume object
|
||||
type RunnerPersistentVolumeClaimReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
Name string
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
|
||||
func (r *RunnerPersistentVolumeClaimReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("pvc", req.NamespacedName)
|
||||
|
||||
var pvc corev1.PersistentVolumeClaim
|
||||
if err := r.Get(ctx, req.NamespacedName, &pvc); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
res, err := syncPVC(ctx, r.Client, log, req.Namespace, &pvc)
|
||||
|
||||
if res == nil {
|
||||
res = &ctrl.Result{}
|
||||
}
|
||||
|
||||
return *res, err
|
||||
}
|
||||
|
||||
func (r *RunnerPersistentVolumeClaimReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
name := "runnerpersistentvolumeclaim-controller"
|
||||
if r.Name != "" {
|
||||
name = r.Name
|
||||
}
|
||||
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&corev1.PersistentVolumeClaim{}).
|
||||
Named(name).
|
||||
Complete(r)
|
||||
}
|
||||
72
controllers/persistent_volume_controller.go
Normal file
72
controllers/persistent_volume_controller.go
Normal file
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
Copyright 2022 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// RunnerPersistentVolumeReconciler reconciles a PersistentVolume object
|
||||
type RunnerPersistentVolumeReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
Name string
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
|
||||
func (r *RunnerPersistentVolumeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("pv", req.NamespacedName)
|
||||
|
||||
var pv corev1.PersistentVolume
|
||||
if err := r.Get(ctx, req.NamespacedName, &pv); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
res, err := syncPV(ctx, r.Client, log, req.Namespace, &pv)
|
||||
if res == nil {
|
||||
res = &ctrl.Result{}
|
||||
}
|
||||
|
||||
return *res, err
|
||||
}
|
||||
|
||||
func (r *RunnerPersistentVolumeReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
name := "runnerpersistentvolume-controller"
|
||||
if r.Name != "" {
|
||||
name = r.Name
|
||||
}
|
||||
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&corev1.PersistentVolume{}).
|
||||
Named(name).
|
||||
Complete(r)
|
||||
}
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
"github.com/go-logr/logr"
|
||||
"gomodules.xyz/jsonpatch/v2"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
@@ -29,7 +28,7 @@ type PodRunnerTokenInjector struct {
|
||||
Name string
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
GitHubClient *github.Client
|
||||
GitHubClient *MultiGitHubClient
|
||||
decoder *admission.Decoder
|
||||
}
|
||||
|
||||
@@ -66,7 +65,12 @@ func (t *PodRunnerTokenInjector) Handle(ctx context.Context, req admission.Reque
|
||||
return newEmptyResponse()
|
||||
}
|
||||
|
||||
rt, err := t.GitHubClient.GetRegistrationToken(context.Background(), enterprise, org, repo, pod.Name)
|
||||
ghc, err := t.GitHubClient.InitForRunnerPod(ctx, &pod)
|
||||
if err != nil {
|
||||
return admission.Errored(http.StatusInternalServerError, err)
|
||||
}
|
||||
|
||||
rt, err := ghc.GetRegistrationToken(context.Background(), enterprise, org, repo, pod.Name)
|
||||
if err != nil {
|
||||
t.Log.Error(err, "Failed to get new registration token")
|
||||
return admission.Errored(http.StatusInternalServerError, err)
|
||||
@@ -78,9 +82,7 @@ func (t *PodRunnerTokenInjector) Handle(ctx context.Context, req admission.Reque
|
||||
|
||||
updated.Annotations[AnnotationKeyTokenExpirationDate] = ts
|
||||
|
||||
if pod.Spec.RestartPolicy != corev1.RestartPolicyOnFailure {
|
||||
updated.Spec.RestartPolicy = corev1.RestartPolicyOnFailure
|
||||
}
|
||||
forceRunnerPodRestartPolicyNever(updated)
|
||||
|
||||
buf, err := json.Marshal(updated)
|
||||
if err != nil {
|
||||
|
||||
@@ -18,7 +18,10 @@ package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -33,10 +36,10 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -47,15 +50,13 @@ const (
|
||||
|
||||
retryDelayOnGitHubAPIRateLimitError = 30 * time.Second
|
||||
|
||||
// This is an annotation internal to actions-runner-controller and can change in backward-incompatible ways
|
||||
annotationKeyRegistrationOnly = "actions-runner-controller/registration-only"
|
||||
|
||||
EnvVarOrg = "RUNNER_ORG"
|
||||
EnvVarRepo = "RUNNER_REPO"
|
||||
EnvVarEnterprise = "RUNNER_ENTERPRISE"
|
||||
EnvVarEphemeral = "RUNNER_EPHEMERAL"
|
||||
EnvVarRunnerFeatureFlagEphemeral = "RUNNER_FEATURE_FLAG_EPHEMERAL"
|
||||
EnvVarTrue = "true"
|
||||
EnvVarOrg = "RUNNER_ORG"
|
||||
EnvVarRepo = "RUNNER_REPO"
|
||||
EnvVarGroup = "RUNNER_GROUP"
|
||||
EnvVarLabels = "RUNNER_LABELS"
|
||||
EnvVarEnterprise = "RUNNER_ENTERPRISE"
|
||||
EnvVarEphemeral = "RUNNER_EPHEMERAL"
|
||||
EnvVarTrue = "true"
|
||||
)
|
||||
|
||||
// RunnerReconciler reconciles a Runner object
|
||||
@@ -64,7 +65,7 @@ type RunnerReconciler struct {
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
GitHubClient *github.Client
|
||||
GitHubClient *MultiGitHubClient
|
||||
RunnerImage string
|
||||
RunnerImagePullSecrets []string
|
||||
DockerImage string
|
||||
@@ -72,16 +73,20 @@ type RunnerReconciler struct {
|
||||
Name string
|
||||
RegistrationRecheckInterval time.Duration
|
||||
RegistrationRecheckJitter time.Duration
|
||||
|
||||
UnregistrationRetryDelay time.Duration
|
||||
UseRunnerStatusUpdateHook bool
|
||||
UnregistrationRetryDelay time.Duration
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=create;delete;get
|
||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=create;delete;get
|
||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=create;delete;get
|
||||
|
||||
func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("runner", req.NamespacedName)
|
||||
@@ -116,6 +121,9 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
||||
// Pod was not found
|
||||
return r.processRunnerDeletion(runner, ctx, log, nil)
|
||||
}
|
||||
|
||||
r.GitHubClient.DeinitForRunner(&runner)
|
||||
|
||||
return r.processRunnerDeletion(runner, ctx, log, &pod)
|
||||
}
|
||||
|
||||
@@ -135,7 +143,7 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
||||
|
||||
ready := runnerPodReady(&pod)
|
||||
|
||||
if runner.Status.Phase != phase || runner.Status.Ready != ready {
|
||||
if (runner.Status.Phase != phase || runner.Status.Ready != ready) && !r.UseRunnerStatusUpdateHook || runner.Status.Phase == "" && r.UseRunnerStatusUpdateHook {
|
||||
if pod.Status.Phase == corev1.PodRunning {
|
||||
// Seeing this message, you can expect the runner to become `Running` soon.
|
||||
log.V(1).Info(
|
||||
@@ -207,6 +215,24 @@ func runnerPodOrContainerIsStopped(pod *corev1.Pod) bool {
|
||||
return stopped
|
||||
}
|
||||
|
||||
func ephemeralRunnerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {
|
||||
if getRunnerEnv(pod, "RUNNER_EPHEMERAL") != "true" {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, status := range pod.Status.ContainerStatuses {
|
||||
if status.Name != containerName {
|
||||
continue
|
||||
}
|
||||
|
||||
status := status
|
||||
|
||||
return &status
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RunnerReconciler) processRunnerDeletion(runner v1alpha1.Runner, ctx context.Context, log logr.Logger, pod *corev1.Pod) (reconcile.Result, error) {
|
||||
finalizers, removed := removeFinalizer(runner.ObjectMeta.Finalizers, finalizerName)
|
||||
|
||||
@@ -238,6 +264,96 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
needsServiceAccount := runner.Spec.ServiceAccountName == "" && (r.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes")
|
||||
if needsServiceAccount {
|
||||
serviceAccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
},
|
||||
}
|
||||
if res := r.createObject(ctx, serviceAccount, serviceAccount.ObjectMeta, &runner, log); res != nil {
|
||||
return *res, nil
|
||||
}
|
||||
|
||||
rules := []rbacv1.PolicyRule{}
|
||||
|
||||
if r.UseRunnerStatusUpdateHook {
|
||||
rules = append(rules, []rbacv1.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"actions.summerwind.dev"},
|
||||
Resources: []string{"runners/status"},
|
||||
Verbs: []string{"get", "update", "patch"},
|
||||
ResourceNames: []string{runner.ObjectMeta.Name},
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
||||
if runner.Spec.ContainerMode == "kubernetes" {
|
||||
// Permissions based on https://github.com/actions/runner-container-hooks/blob/main/packages/k8s/README.md
|
||||
rules = append(rules, []rbacv1.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"pods"},
|
||||
Verbs: []string{"get", "list", "create", "delete"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"pods/exec"},
|
||||
Verbs: []string{"get", "create"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"pods/log"},
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"batch"},
|
||||
Resources: []string{"jobs"},
|
||||
Verbs: []string{"get", "list", "create", "delete"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"secrets"},
|
||||
Verbs: []string{"get", "list", "create", "delete"},
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
},
|
||||
Rules: rules,
|
||||
}
|
||||
if res := r.createObject(ctx, role, role.ObjectMeta, &runner, log); res != nil {
|
||||
return *res, nil
|
||||
}
|
||||
|
||||
roleBinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: runner.ObjectMeta.Name,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
if res := r.createObject(ctx, roleBinding, roleBinding.ObjectMeta, &runner, log); res != nil {
|
||||
return *res, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.Create(ctx, &newPod); err != nil {
|
||||
if kerrors.IsAlreadyExists(err) {
|
||||
// Gracefully handle pod-already-exists errors due to informer cache delay.
|
||||
@@ -260,6 +376,27 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *RunnerReconciler) createObject(ctx context.Context, obj client.Object, meta metav1.ObjectMeta, runner *v1alpha1.Runner, log logr.Logger) *ctrl.Result {
|
||||
kind := strings.Split(reflect.TypeOf(obj).String(), ".")[1]
|
||||
if err := ctrl.SetControllerReference(runner, obj, r.Scheme); err != nil {
|
||||
log.Error(err, fmt.Sprintf("Could not add owner reference to %s %s. %s", kind, meta.Name, err.Error()))
|
||||
return &ctrl.Result{Requeue: true}
|
||||
}
|
||||
if err := r.Create(ctx, obj); err != nil {
|
||||
if kerrors.IsAlreadyExists(err) {
|
||||
log.Info(fmt.Sprintf("Failed to create %s %s as it already exists. Reusing existing %s", kind, meta.Name, kind))
|
||||
r.Recorder.Event(runner, corev1.EventTypeNormal, fmt.Sprintf("%sReused", kind), fmt.Sprintf("Reused %s '%s'", kind, meta.Name))
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Error(err, fmt.Sprintf("Retrying as failed to create %s %s resource", kind, meta.Name))
|
||||
return &ctrl.Result{Requeue: true}
|
||||
}
|
||||
r.Recorder.Event(runner, corev1.EventTypeNormal, fmt.Sprintf("%sCreated", kind), fmt.Sprintf("Created %s '%s'", kind, meta.Name))
|
||||
log.Info(fmt.Sprintf("Created %s", kind), "name", meta.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v1alpha1.Runner) (bool, error) {
|
||||
if runner.IsRegisterable() {
|
||||
return false, nil
|
||||
@@ -267,7 +404,12 @@ func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v
|
||||
|
||||
log := r.Log.WithValues("runner", runner.Name)
|
||||
|
||||
rt, err := r.GitHubClient.GetRegistrationToken(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||
ghc, err := r.GitHubClient.InitForRunner(ctx, &runner)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
rt, err := ghc.GetRegistrationToken(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||
if err != nil {
|
||||
// An error can be a permanent, permission issue like the below:
|
||||
// POST https://api.github.com/enterprises/YOUR_ENTERPRISE/actions/runners/registration-token: 403 Resource not accessible by integration []
|
||||
@@ -307,6 +449,11 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
labels[k] = v
|
||||
}
|
||||
|
||||
ghc, err := r.GitHubClient.InitForRunner(context.Background(), &runner)
|
||||
if err != nil {
|
||||
return corev1.Pod{}, err
|
||||
}
|
||||
|
||||
// This implies that...
|
||||
//
|
||||
// (1) We recreate the runner pod whenever the runner has changes in:
|
||||
@@ -330,7 +477,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
filterLabels(runner.ObjectMeta.Labels, LabelKeyRunnerTemplateHash),
|
||||
runner.ObjectMeta.Annotations,
|
||||
runner.Spec,
|
||||
r.GitHubClient.GithubBaseURL,
|
||||
ghc.GithubBaseURL,
|
||||
// Token change should trigger replacement.
|
||||
// We need to include this explicitly here because
|
||||
// runner.Spec does not contain the possibly updated token stored in the
|
||||
@@ -349,31 +496,66 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
|
||||
if len(runner.Spec.Containers) == 0 {
|
||||
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
||||
Name: "runner",
|
||||
ImagePullPolicy: runner.Spec.ImagePullPolicy,
|
||||
EnvFrom: runner.Spec.EnvFrom,
|
||||
Env: runner.Spec.Env,
|
||||
Resources: runner.Spec.Resources,
|
||||
Name: "runner",
|
||||
})
|
||||
|
||||
if (runner.Spec.DockerEnabled == nil || *runner.Spec.DockerEnabled) && (runner.Spec.DockerdWithinRunnerContainer == nil || !*runner.Spec.DockerdWithinRunnerContainer) {
|
||||
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
||||
Name: "docker",
|
||||
VolumeMounts: runner.Spec.DockerVolumeMounts,
|
||||
Resources: runner.Spec.DockerdContainerResources,
|
||||
Env: runner.Spec.DockerEnv,
|
||||
Name: "docker",
|
||||
})
|
||||
}
|
||||
} else {
|
||||
template.Spec.Containers = runner.Spec.Containers
|
||||
}
|
||||
|
||||
for i, c := range template.Spec.Containers {
|
||||
switch c.Name {
|
||||
case "runner":
|
||||
if c.ImagePullPolicy == "" {
|
||||
template.Spec.Containers[i].ImagePullPolicy = runner.Spec.ImagePullPolicy
|
||||
}
|
||||
if len(c.EnvFrom) == 0 {
|
||||
template.Spec.Containers[i].EnvFrom = runner.Spec.EnvFrom
|
||||
}
|
||||
if len(c.Env) == 0 {
|
||||
template.Spec.Containers[i].Env = runner.Spec.Env
|
||||
}
|
||||
if len(c.Resources.Requests) == 0 {
|
||||
template.Spec.Containers[i].Resources.Requests = runner.Spec.Resources.Requests
|
||||
}
|
||||
if len(c.Resources.Limits) == 0 {
|
||||
template.Spec.Containers[i].Resources.Limits = runner.Spec.Resources.Limits
|
||||
}
|
||||
case "docker":
|
||||
if len(c.VolumeMounts) == 0 {
|
||||
template.Spec.Containers[i].VolumeMounts = runner.Spec.DockerVolumeMounts
|
||||
}
|
||||
if len(c.Resources.Limits) == 0 {
|
||||
template.Spec.Containers[i].Resources.Limits = runner.Spec.DockerdContainerResources.Limits
|
||||
}
|
||||
if len(c.Resources.Requests) == 0 {
|
||||
template.Spec.Containers[i].Resources.Requests = runner.Spec.DockerdContainerResources.Requests
|
||||
}
|
||||
if len(c.Env) == 0 {
|
||||
template.Spec.Containers[i].Env = runner.Spec.DockerEnv
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template.Spec.SecurityContext = runner.Spec.SecurityContext
|
||||
template.Spec.EnableServiceLinks = runner.Spec.EnableServiceLinks
|
||||
|
||||
registrationOnly := metav1.HasAnnotation(runner.ObjectMeta, annotationKeyRegistrationOnly)
|
||||
if runner.Spec.ContainerMode == "kubernetes" {
|
||||
workDir := runner.Spec.WorkDir
|
||||
if workDir == "" {
|
||||
workDir = "/runner/_work"
|
||||
}
|
||||
if err := applyWorkVolumeClaimTemplateToPod(&template, runner.Spec.WorkVolumeClaimTemplate, workDir); err != nil {
|
||||
return corev1.Pod{}, err
|
||||
}
|
||||
}
|
||||
|
||||
pod, err := newRunnerPod(runner.Name, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubClient.GithubBaseURL, registrationOnly)
|
||||
pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, ghc.GithubBaseURL, r.UseRunnerStatusUpdateHook)
|
||||
if err != nil {
|
||||
return pod, err
|
||||
}
|
||||
@@ -385,6 +567,9 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
// if operater provides a work volume mount, use that
|
||||
isPresent, _ := workVolumeMountPresent(runnerSpec.VolumeMounts)
|
||||
if isPresent {
|
||||
if runnerSpec.ContainerMode == "kubernetes" {
|
||||
return pod, errors.New("volume mount \"work\" should be specified by workVolumeClaimTemplate in container mode kubernetes")
|
||||
}
|
||||
// remove work volume since it will be provided from runnerSpec.Volumes
|
||||
// if we don't remove it here we would get a duplicate key error, i.e. two volumes named work
|
||||
_, index := workVolumeMountPresent(pod.Spec.Containers[0].VolumeMounts)
|
||||
@@ -398,6 +583,9 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
// if operator provides a work volume. use that
|
||||
isPresent, _ := workVolumePresent(runnerSpec.Volumes)
|
||||
if isPresent {
|
||||
if runnerSpec.ContainerMode == "kubernetes" {
|
||||
return pod, errors.New("volume \"work\" should be specified by workVolumeClaimTemplate in container mode kubernetes")
|
||||
}
|
||||
_, index := workVolumePresent(pod.Spec.Volumes)
|
||||
|
||||
// remove work volume since it will be provided from runnerSpec.Volumes
|
||||
@@ -407,6 +595,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes, runnerSpec.Volumes...)
|
||||
}
|
||||
|
||||
if len(runnerSpec.InitContainers) != 0 {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, runnerSpec.InitContainers...)
|
||||
}
|
||||
@@ -414,9 +603,13 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
if runnerSpec.NodeSelector != nil {
|
||||
pod.Spec.NodeSelector = runnerSpec.NodeSelector
|
||||
}
|
||||
|
||||
if runnerSpec.ServiceAccountName != "" {
|
||||
pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName
|
||||
} else if r.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes" {
|
||||
pod.Spec.ServiceAccountName = runner.ObjectMeta.Name
|
||||
}
|
||||
|
||||
if runnerSpec.AutomountServiceAccountToken != nil {
|
||||
pod.Spec.AutomountServiceAccountToken = runnerSpec.AutomountServiceAccountToken
|
||||
}
|
||||
@@ -437,6 +630,10 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
pod.Spec.Tolerations = runnerSpec.Tolerations
|
||||
}
|
||||
|
||||
if runnerSpec.PriorityClassName != "" {
|
||||
pod.Spec.PriorityClassName = runnerSpec.PriorityClassName
|
||||
}
|
||||
|
||||
if len(runnerSpec.TopologySpreadConstraints) != 0 {
|
||||
pod.Spec.TopologySpreadConstraints = runnerSpec.TopologySpreadConstraints
|
||||
}
|
||||
@@ -487,7 +684,45 @@ func mutatePod(pod *corev1.Pod, token string) *corev1.Pod {
|
||||
return updated
|
||||
}
|
||||
|
||||
func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, registrationOnly bool) (corev1.Pod, error) {
|
||||
func runnerHookEnvs(pod *corev1.Pod) ([]corev1.EnvVar, error) {
|
||||
isRequireSameNode, err := isRequireSameNode(pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []corev1.EnvVar{
|
||||
{
|
||||
Name: "ACTIONS_RUNNER_CONTAINER_HOOKS",
|
||||
Value: defaultRunnerHookPath,
|
||||
},
|
||||
{
|
||||
Name: "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER",
|
||||
Value: "true",
|
||||
},
|
||||
{
|
||||
Name: "ACTIONS_RUNNER_POD_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ACTIONS_RUNNER_JOB_NAMESPACE",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "ACTIONS_RUNNER_REQUIRE_SAME_NODE",
|
||||
Value: strconv.FormatBool(isRequireSameNode),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, useRunnerStatusUpdateHook bool) (corev1.Pod, error) {
|
||||
var (
|
||||
privileged bool = true
|
||||
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
||||
@@ -496,11 +731,20 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
||||
dockerdInRunnerPrivileged bool = dockerdInRunner
|
||||
)
|
||||
|
||||
if containerMode == "kubernetes" {
|
||||
dockerdInRunner = false
|
||||
dockerEnabled = false
|
||||
dockerdInRunnerPrivileged = false
|
||||
}
|
||||
|
||||
template = *template.DeepCopy()
|
||||
|
||||
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunnerSetName, runnerName)
|
||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunner, "")
|
||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyPodMutation, LabelValuePodMutation)
|
||||
if runnerSpec.GitHubAPICredentialsFrom != nil {
|
||||
template.ObjectMeta.Annotations = CloneAndAddLabel(template.ObjectMeta.Annotations, annotationKeyGitHubAPICredsSecret, runnerSpec.GitHubAPICredentialsFrom.SecretRef.Name)
|
||||
}
|
||||
|
||||
workDir := runnerSpec.WorkDir
|
||||
if workDir == "" {
|
||||
@@ -530,11 +774,11 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
||||
Value: runnerSpec.Enterprise,
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_LABELS",
|
||||
Name: EnvVarLabels,
|
||||
Value: strings.Join(runnerSpec.Labels, ","),
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_GROUP",
|
||||
Name: EnvVarGroup,
|
||||
Value: runnerSpec.Group,
|
||||
},
|
||||
{
|
||||
@@ -557,14 +801,10 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
||||
Name: EnvVarEphemeral,
|
||||
Value: fmt.Sprintf("%v", ephemeral),
|
||||
},
|
||||
}
|
||||
|
||||
if registrationOnly {
|
||||
env = append(env, corev1.EnvVar{
|
||||
Name: "RUNNER_REGISTRATION_ONLY",
|
||||
Value: "true",
|
||||
{
|
||||
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||
Value: fmt.Sprintf("%v", useRunnerStatusUpdateHook),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
var seLinuxOptions *corev1.SELinuxOptions
|
||||
@@ -590,6 +830,17 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
||||
}
|
||||
}
|
||||
|
||||
if containerMode == "kubernetes" {
|
||||
if dockerdContainer != nil {
|
||||
template.Spec.Containers = append(template.Spec.Containers[:dockerdContainerIndex], template.Spec.Containers[dockerdContainerIndex+1:]...)
|
||||
}
|
||||
if runnerContainerIndex < runnerContainerIndex {
|
||||
runnerContainerIndex--
|
||||
}
|
||||
dockerdContainer = nil
|
||||
dockerdContainerIndex = -1
|
||||
}
|
||||
|
||||
if runnerContainer == nil {
|
||||
runnerContainerIndex = -1
|
||||
runnerContainer = &corev1.Container{
|
||||
@@ -620,18 +871,26 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
||||
}
|
||||
|
||||
runnerContainer.Env = append(runnerContainer.Env, env...)
|
||||
if containerMode == "kubernetes" {
|
||||
hookEnvs, err := runnerHookEnvs(&template)
|
||||
if err != nil {
|
||||
return corev1.Pod{}, err
|
||||
}
|
||||
runnerContainer.Env = append(runnerContainer.Env, hookEnvs...)
|
||||
}
|
||||
|
||||
if runnerContainer.SecurityContext == nil {
|
||||
runnerContainer.SecurityContext = &corev1.SecurityContext{}
|
||||
}
|
||||
// Runner need to run privileged if it contains DinD
|
||||
runnerContainer.SecurityContext.Privileged = &dockerdInRunnerPrivileged
|
||||
|
||||
if runnerContainer.SecurityContext.Privileged == nil {
|
||||
// Runner need to run privileged if it contains DinD
|
||||
runnerContainer.SecurityContext.Privileged = &dockerdInRunnerPrivileged
|
||||
}
|
||||
|
||||
pod := template.DeepCopy()
|
||||
|
||||
if pod.Spec.RestartPolicy == "" {
|
||||
pod.Spec.RestartPolicy = "OnFailure"
|
||||
}
|
||||
forceRunnerPodRestartPolicyNever(pod)
|
||||
|
||||
if mtu := runnerSpec.DockerMTU; mtu != nil && dockerdInRunner {
|
||||
runnerContainer.Env = append(runnerContainer.Env, []corev1.EnvVar{
|
||||
@@ -709,13 +968,18 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
||||
)
|
||||
}
|
||||
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||
corev1.Volume{
|
||||
Name: "work",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
if ok, _ := workVolumePresent(pod.Spec.Volumes); !ok {
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||
corev1.Volume{
|
||||
Name: "work",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||
corev1.Volume{
|
||||
Name: "certs-client",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
@@ -724,11 +988,16 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
||||
},
|
||||
)
|
||||
|
||||
if ok, _ := workVolumeMountPresent(runnerContainer.VolumeMounts); !ok {
|
||||
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts,
|
||||
corev1.VolumeMount{
|
||||
Name: "work",
|
||||
MountPath: workDir,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts,
|
||||
corev1.VolumeMount{
|
||||
Name: "work",
|
||||
MountPath: workDir,
|
||||
},
|
||||
corev1.VolumeMount{
|
||||
Name: "certs-client",
|
||||
MountPath: "/certs/client",
|
||||
@@ -830,15 +1099,13 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
||||
}
|
||||
}
|
||||
|
||||
// TODO Remove this once we remove RUNNER_FEATURE_FLAG_EPHEMERAL from runner's entrypoint.sh
|
||||
// and make --ephemeral the default option.
|
||||
if getRunnerEnv(pod, EnvVarRunnerFeatureFlagEphemeral) == "" {
|
||||
setRunnerEnv(pod, EnvVarRunnerFeatureFlagEphemeral, EnvVarTrue)
|
||||
}
|
||||
|
||||
return *pod, nil
|
||||
}
|
||||
|
||||
func newRunnerPod(template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, useRunnerStatusUpdateHookEphemeralRole bool) (corev1.Pod, error) {
|
||||
return newRunnerPodWithContainerMode("", template, runnerSpec, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, useRunnerStatusUpdateHookEphemeralRole)
|
||||
}
|
||||
|
||||
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
name := "runner-controller"
|
||||
if r.Name != "" {
|
||||
@@ -901,3 +1168,71 @@ func workVolumeMountPresent(items []corev1.VolumeMount) (bool, int) {
|
||||
}
|
||||
return false, 0
|
||||
}
|
||||
|
||||
func applyWorkVolumeClaimTemplateToPod(pod *corev1.Pod, workVolumeClaimTemplate *v1alpha1.WorkVolumeClaimTemplate, workDir string) error {
|
||||
if workVolumeClaimTemplate == nil {
|
||||
return errors.New("work volume claim template must be specified in container mode kubernetes")
|
||||
}
|
||||
for i := range pod.Spec.Volumes {
|
||||
if pod.Spec.Volumes[i].Name == "work" {
|
||||
return fmt.Errorf("Work volume should not be specified in container mode kubernetes. workVolumeClaimTemplate field should be used instead.")
|
||||
}
|
||||
}
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes, workVolumeClaimTemplate.V1Volume())
|
||||
|
||||
var runnerContainer *corev1.Container
|
||||
for i := range pod.Spec.Containers {
|
||||
if pod.Spec.Containers[i].Name == "runner" {
|
||||
runnerContainer = &pod.Spec.Containers[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if runnerContainer == nil {
|
||||
return fmt.Errorf("runner container is not present when applying work volume claim template")
|
||||
}
|
||||
|
||||
if isPresent, _ := workVolumeMountPresent(runnerContainer.VolumeMounts); isPresent {
|
||||
return fmt.Errorf("volume mount \"work\" should not be present on the runner container in container mode kubernetes")
|
||||
}
|
||||
|
||||
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts, workVolumeClaimTemplate.V1VolumeMount(workDir))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isRequireSameNode specifies for the runner in kubernetes mode wether it should
|
||||
// schedule jobs to the same node where the runner is
|
||||
//
|
||||
// This function should only be called in containerMode: kubernetes
|
||||
func isRequireSameNode(pod *corev1.Pod) (bool, error) {
|
||||
isPresent, index := workVolumePresent(pod.Spec.Volumes)
|
||||
if !isPresent {
|
||||
return true, errors.New("internal error: work volume mount must exist in containerMode: kubernetes")
|
||||
}
|
||||
|
||||
if pod.Spec.Volumes[index].Ephemeral == nil || pod.Spec.Volumes[index].Ephemeral.VolumeClaimTemplate == nil {
|
||||
return true, errors.New("containerMode: kubernetes should have pod.Spec.Volumes[].Ephemeral.VolumeClaimTemplate set")
|
||||
}
|
||||
|
||||
for _, accessMode := range pod.Spec.Volumes[index].Ephemeral.VolumeClaimTemplate.Spec.AccessModes {
|
||||
switch accessMode {
|
||||
case corev1.ReadWriteOnce:
|
||||
return true, nil
|
||||
case corev1.ReadWriteMany:
|
||||
default:
|
||||
return true, errors.New("actions-runner-controller supports ReadWriteOnce and ReadWriteMany modes only")
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func overwriteRunnerEnv(runner *v1alpha1.Runner, key string, value string) {
|
||||
for i := range runner.Spec.Env {
|
||||
if runner.Spec.Env[i].Name == key {
|
||||
runner.Spec.Env[i].Value = value
|
||||
return
|
||||
}
|
||||
}
|
||||
runner.Spec.Env = append(runner.Spec.Env, corev1.EnvVar{Name: key, Value: value})
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
"github.com/go-logr/logr"
|
||||
gogithub "github.com/google/go-github/v39/github"
|
||||
gogithub "github.com/google/go-github/v45/github"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -113,10 +113,28 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
|
||||
// Happens e.g. when dind is in runner and run completes
|
||||
log.Info("Runner pod has been stopped with a successful status.")
|
||||
} else if pod != nil && pod.Annotations[AnnotationKeyRunnerCompletionWaitStartTimestamp] != "" {
|
||||
log.Info("Runner pod is annotated to wait for completion")
|
||||
ct := ephemeralRunnerContainerStatus(pod)
|
||||
if ct == nil {
|
||||
log.Info("Runner pod is annotated to wait for completion, and the runner container is not ephemeral")
|
||||
|
||||
return &ctrl.Result{RequeueAfter: retryDelay}, nil
|
||||
} else if ok, err := unregisterRunner(ctx, ghClient, enterprise, organization, repository, runner, *runnerID); err != nil {
|
||||
return &ctrl.Result{RequeueAfter: retryDelay}, nil
|
||||
}
|
||||
|
||||
lts := ct.LastTerminationState.Terminated
|
||||
if lts == nil {
|
||||
log.Info("Runner pod is annotated to wait for completion, and the runner container is not restarting")
|
||||
|
||||
return &ctrl.Result{RequeueAfter: retryDelay}, nil
|
||||
}
|
||||
|
||||
// Prevent runner pod from stucking in Terminating.
|
||||
// See https://github.com/actions-runner-controller/actions-runner-controller/issues/1369
|
||||
log.Info("Deleting runner pod anyway because it has stopped prematurely. This may leave a dangling runner resource in GitHub Actions",
|
||||
"lastState.exitCode", lts.ExitCode,
|
||||
"lastState.message", lts.Message,
|
||||
"pod.phase", pod.Status.Phase,
|
||||
)
|
||||
} else if ok, err := unregisterRunner(ctx, ghClient, enterprise, organization, repository, *runnerID); err != nil {
|
||||
if errors.Is(err, &gogithub.RateLimitError{}) {
|
||||
// We log the underlying error when we failed calling GitHub API to list or unregisters,
|
||||
// or the runner is still busy.
|
||||
@@ -133,7 +151,10 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
|
||||
|
||||
log.V(1).Info("Failed to unregister runner before deleting the pod.", "error", err)
|
||||
|
||||
var runnerBusy bool
|
||||
var (
|
||||
runnerBusy bool
|
||||
runnerUnregistrationFailureMessage string
|
||||
)
|
||||
|
||||
errRes := &gogithub.ErrorResponse{}
|
||||
if errors.As(err, &errRes) {
|
||||
@@ -155,6 +176,7 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
|
||||
}
|
||||
|
||||
runnerBusy = errRes.Response.StatusCode == 422
|
||||
runnerUnregistrationFailureMessage = errRes.Message
|
||||
|
||||
if runnerBusy && code != nil {
|
||||
log.V(2).Info("Runner container has already stopped but the unregistration attempt failed. "+
|
||||
@@ -169,13 +191,18 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
|
||||
}
|
||||
|
||||
if runnerBusy {
|
||||
_, err := annotatePodOnce(ctx, c, log, pod, AnnotationKeyUnregistrationFailureMessage, runnerUnregistrationFailureMessage)
|
||||
if err != nil {
|
||||
return &ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// We want to prevent spamming the deletion attemps but returning ctrl.Result with RequeueAfter doesn't
|
||||
// work as the reconcilation can happen earlier due to pod status update.
|
||||
// For ephemeral runners, we can expect it to stop and unregister itself on completion.
|
||||
// So we can just wait for the completion without actively retrying unregistration.
|
||||
ephemeral := getRunnerEnv(pod, EnvVarEphemeral)
|
||||
if ephemeral == "true" {
|
||||
pod, err = annotatePodOnce(ctx, c, log, pod, AnnotationKeyRunnerCompletionWaitStartTimestamp, time.Now().Format(time.RFC3339))
|
||||
_, err = annotatePodOnce(ctx, c, log, pod, AnnotationKeyRunnerCompletionWaitStartTimestamp, time.Now().Format(time.RFC3339))
|
||||
if err != nil {
|
||||
return &ctrl.Result{}, err
|
||||
}
|
||||
@@ -352,7 +379,7 @@ func setRunnerEnv(pod *corev1.Pod, key, value string) {
|
||||
// There isn't a single right grace period that works for everyone.
|
||||
// The longer the grace period is, the earlier a cluster resource shortage can occur due to throttoled runner pod deletions,
|
||||
// while the shorter the grace period is, the more likely you may encounter the race issue.
|
||||
func unregisterRunner(ctx context.Context, client *github.Client, enterprise, org, repo, name string, id int64) (bool, error) {
|
||||
func unregisterRunner(ctx context.Context, client *github.Client, enterprise, org, repo string, id int64) (bool, error) {
|
||||
// For the record, historically ARC did not try to call RemoveRunner on a busy runner, but it's no longer true.
|
||||
// The reason ARC did so was to let a runner running a job to not stop prematurely.
|
||||
//
|
||||
|
||||
22
controllers/runner_pod.go
Normal file
22
controllers/runner_pod.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package controllers
|
||||
|
||||
import corev1 "k8s.io/api/core/v1"
|
||||
|
||||
// Force the runner pod managed by either RunnerDeployment and RunnerSet to have restartPolicy=Never.
|
||||
// See https://github.com/actions-runner-controller/actions-runner-controller/issues/1369 for more context.
|
||||
//
|
||||
// This is to prevent runner pods from stucking in Terminating when a K8s node disappeared along with the runnr pod and the runner container within it.
|
||||
//
|
||||
// Previously, we used restartPolicy of OnFailure, it turned wrong later, and therefore we now set Never.
|
||||
//
|
||||
// When the restartPolicy is OnFailure and the node disappeared, runner pods on the node seem to stuck in state.terminated==nil, state.waiting!=nil, and state.lastTerminationState!=nil,
|
||||
// and will ever become Running.
|
||||
// It's probably due to that the node onto which the pods have been scheduled will ever come back, hence the container restart attempt swill ever succeed,
|
||||
// the pods stuck waiting for successful restarts forever.
|
||||
//
|
||||
// By forcing runner pods to never restart, we hope there will be no chances of pods being stuck waiting.
|
||||
func forceRunnerPodRestartPolicyNever(pod *corev1.Pod) {
|
||||
if pod.Spec.RestartPolicy != corev1.RestartPolicyNever {
|
||||
pod.Spec.RestartPolicy = corev1.RestartPolicyNever
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
@@ -31,8 +32,6 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
)
|
||||
|
||||
// RunnerPodReconciler reconciles a Runner object
|
||||
@@ -41,7 +40,7 @@ type RunnerPodReconciler struct {
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
GitHubClient *github.Client
|
||||
GitHubClient *MultiGitHubClient
|
||||
Name string
|
||||
RegistrationRecheckInterval time.Duration
|
||||
RegistrationRecheckJitter time.Duration
|
||||
@@ -50,6 +49,7 @@ type RunnerPodReconciler struct {
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
|
||||
func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
@@ -60,8 +60,11 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
_, isRunnerPod := runnerPod.Labels[LabelKeyRunnerSetName]
|
||||
if !isRunnerPod {
|
||||
_, isRunnerPod := runnerPod.Labels[LabelKeyRunner]
|
||||
_, isRunnerSetPod := runnerPod.Labels[LabelKeyRunnerSetName]
|
||||
_, isRunnerDeploymentPod := runnerPod.Labels[LabelKeyRunnerDeploymentName]
|
||||
|
||||
if !isRunnerPod && !isRunnerSetPod && !isRunnerDeploymentPod {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -77,6 +80,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
}
|
||||
|
||||
var enterprise, org, repo string
|
||||
var isContainerMode bool
|
||||
|
||||
for _, e := range envvars {
|
||||
switch e.Name {
|
||||
@@ -86,13 +90,25 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
org = e.Value
|
||||
case EnvVarRepo:
|
||||
repo = e.Value
|
||||
case "ACTIONS_RUNNER_CONTAINER_HOOKS":
|
||||
isContainerMode = true
|
||||
}
|
||||
}
|
||||
|
||||
ghc, err := r.GitHubClient.InitForRunnerPod(ctx, &runnerPod)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if runnerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
finalizers, added := addFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||
|
||||
if added {
|
||||
var cleanupFinalizersAdded bool
|
||||
if isContainerMode {
|
||||
finalizers, cleanupFinalizersAdded = addFinalizer(finalizers, runnerLinkedResourcesFinalizerName)
|
||||
}
|
||||
|
||||
if added || cleanupFinalizersAdded {
|
||||
newRunner := runnerPod.DeepCopy()
|
||||
newRunner.ObjectMeta.Finalizers = finalizers
|
||||
|
||||
@@ -108,13 +124,34 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
} else {
|
||||
log.V(2).Info("Seen deletion-timestamp is already set")
|
||||
|
||||
if finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerLinkedResourcesFinalizerName); removed {
|
||||
if err := r.cleanupRunnerLinkedPods(ctx, &runnerPod, log); err != nil {
|
||||
log.Info("Runner-linked pods clean up that has failed due to an error. If this persists, please manually remove the runner-linked pods to unblock ARC", "err", err.Error())
|
||||
return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil
|
||||
}
|
||||
if err := r.cleanupRunnerLinkedSecrets(ctx, &runnerPod, log); err != nil {
|
||||
log.Info("Runner-linked secrets clean up that has failed due to an error. If this persists, please manually remove the runner-linked secrets to unblock ARC", "err", err.Error())
|
||||
return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil
|
||||
}
|
||||
patchedPod := runnerPod.DeepCopy()
|
||||
patchedPod.ObjectMeta.Finalizers = finalizers
|
||||
|
||||
if err := r.Patch(ctx, patchedPod, client.MergeFrom(&runnerPod)); err != nil {
|
||||
log.Error(err, "Failed to update runner for finalizer linked resources removal")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Otherwise the subsequent patch request can revive the removed finalizer and it will trigger a unnecessary reconcilation
|
||||
runnerPod = *patchedPod
|
||||
}
|
||||
|
||||
finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||
|
||||
if removed {
|
||||
// In a standard scenario, the upstream controller, like runnerset-controller, ensures this runner to be gracefully stopped before the deletion timestamp is set.
|
||||
// But for the case that the user manually deleted it for whatever reason,
|
||||
// we have to ensure it to gracefully stop now.
|
||||
updatedPod, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, r.GitHubClient, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
||||
updatedPod, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, ghc, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
||||
if res != nil {
|
||||
return *res, err
|
||||
}
|
||||
@@ -130,6 +167,8 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
|
||||
log.V(2).Info("Removed finalizer")
|
||||
|
||||
r.GitHubClient.DeinitForRunnerPod(updatedPod)
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -168,7 +207,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
po, res, err := ensureRunnerPodRegistered(ctx, log, r.GitHubClient, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
||||
po, res, err := ensureRunnerPodRegistered(ctx, log, ghc, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
||||
if res != nil {
|
||||
return *res, err
|
||||
}
|
||||
@@ -182,7 +221,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
//
|
||||
// In a standard scenario, ARC starts the unregistration process before marking the pod for deletion at all,
|
||||
// so that it isn't subject to terminationGracePeriod and can safely take hours to finish it's work.
|
||||
_, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, r.GitHubClient, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
||||
_, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, ghc, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
||||
if res != nil {
|
||||
return *res, err
|
||||
}
|
||||
@@ -222,3 +261,93 @@ func (r *RunnerPodReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
Named(name).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *RunnerPodReconciler) cleanupRunnerLinkedPods(ctx context.Context, pod *corev1.Pod, log logr.Logger) error {
|
||||
var runnerLinkedPodList corev1.PodList
|
||||
if err := r.List(ctx, &runnerLinkedPodList, client.InNamespace(pod.Namespace), client.MatchingLabels(
|
||||
map[string]string{
|
||||
"runner-pod": pod.ObjectMeta.Name,
|
||||
},
|
||||
)); err != nil {
|
||||
return fmt.Errorf("failed to list runner-linked pods: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
errs []error
|
||||
)
|
||||
for _, p := range runnerLinkedPodList.Items {
|
||||
if !p.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
p := p
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := r.Delete(ctx, &p); err != nil {
|
||||
if kerrors.IsNotFound(err) || kerrors.IsGone(err) {
|
||||
return
|
||||
}
|
||||
errs = append(errs, fmt.Errorf("delete pod %q error: %v", p.ObjectMeta.Name, err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if len(errs) > 0 {
|
||||
for _, err := range errs {
|
||||
log.Error(err, "failed to remove runner-linked pod")
|
||||
}
|
||||
return errors.New("failed to remove some runner linked pods")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RunnerPodReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, pod *corev1.Pod, log logr.Logger) error {
|
||||
log.V(2).Info("Listing runner-linked secrets to be deleted", "ns", pod.Namespace)
|
||||
|
||||
var runnerLinkedSecretList corev1.SecretList
|
||||
if err := r.List(ctx, &runnerLinkedSecretList, client.InNamespace(pod.Namespace), client.MatchingLabels(
|
||||
map[string]string{
|
||||
"runner-pod": pod.ObjectMeta.Name,
|
||||
},
|
||||
)); err != nil {
|
||||
return fmt.Errorf("failed to list runner-linked secrets: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
errs []error
|
||||
)
|
||||
for _, s := range runnerLinkedSecretList.Items {
|
||||
if !s.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
s := s
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := r.Delete(ctx, &s); err != nil {
|
||||
if kerrors.IsNotFound(err) || kerrors.IsGone(err) {
|
||||
return
|
||||
}
|
||||
errs = append(errs, fmt.Errorf("delete secret %q error: %v", s.ObjectMeta.Name, err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if len(errs) > 0 {
|
||||
for _, err := range errs {
|
||||
log.Error(err, "failed to remove runner-linked secret")
|
||||
}
|
||||
return errors.New("failed to remove some runner linked secrets")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -179,7 +179,10 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
||||
newDesiredReplicas := getIntOrDefault(desiredRS.Spec.Replicas, defaultReplicas)
|
||||
|
||||
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
||||
if currentDesiredReplicas != newDesiredReplicas {
|
||||
//
|
||||
// If we missed taking the EffectiveTime diff into account, you might end up experiencing scale-ups being delayed scale-down.
|
||||
// See https://github.com/actions-runner-controller/actions-runner-controller/pull/1477#issuecomment-1164154496
|
||||
if currentDesiredReplicas != newDesiredReplicas || newestSet.Spec.EffectiveTime != rd.Spec.EffectiveTime {
|
||||
newestSet.Spec.Replicas = &newDesiredReplicas
|
||||
newestSet.Spec.EffectiveTime = rd.Spec.EffectiveTime
|
||||
|
||||
@@ -421,9 +424,7 @@ func getSelector(rd *v1alpha1.RunnerDeployment) *metav1.LabelSelector {
|
||||
func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []string, scheme *runtime.Scheme) (*v1alpha1.RunnerReplicaSet, error) {
|
||||
newRSTemplate := *rd.Spec.Template.DeepCopy()
|
||||
|
||||
for _, l := range commonRunnerLabels {
|
||||
newRSTemplate.Spec.Labels = append(newRSTemplate.Spec.Labels, l)
|
||||
}
|
||||
newRSTemplate.Spec.Labels = append(newRSTemplate.Spec.Labels, commonRunnerLabels...)
|
||||
|
||||
templateHash := ComputeHash(&newRSTemplate)
|
||||
|
||||
|
||||
@@ -32,17 +32,15 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
)
|
||||
|
||||
// RunnerReplicaSetReconciler reconciles a Runner object
|
||||
type RunnerReplicaSetReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
GitHubClient *github.Client
|
||||
Name string
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
Name string
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -205,7 +203,3 @@ func (r *RunnerReplicaSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
Named(name).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func registrationOnlyRunnerNameFor(rsName string) string {
|
||||
return rsName + "-registration-only"
|
||||
}
|
||||
|
||||
@@ -52,15 +52,13 @@ func SetupTest(ctx2 context.Context) *corev1.Namespace {
|
||||
|
||||
runnersList = fake.NewRunnersList()
|
||||
server = runnersList.GetServer()
|
||||
ghClient := newGithubClient(server)
|
||||
|
||||
controller := &RunnerReplicaSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
Log: logf.Log,
|
||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||
GitHubClient: ghClient,
|
||||
Name: "runnerreplicaset-" + ns.Name,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
Log: logf.Log,
|
||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||
Name: "runnerreplicaset-" + ns.Name,
|
||||
}
|
||||
err = controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
|
||||
@@ -45,12 +45,13 @@ type RunnerSetReconciler struct {
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
|
||||
CommonRunnerLabels []string
|
||||
GitHubBaseURL string
|
||||
RunnerImage string
|
||||
RunnerImagePullSecrets []string
|
||||
DockerImage string
|
||||
DockerRegistryMirror string
|
||||
CommonRunnerLabels []string
|
||||
GitHubClient *MultiGitHubClient
|
||||
RunnerImage string
|
||||
RunnerImagePullSecrets []string
|
||||
DockerImage string
|
||||
DockerRegistryMirror string
|
||||
UseRunnerStatusUpdateHook bool
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -58,6 +59,7 @@ type RunnerSetReconciler struct {
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=apps,resources=statefulsets/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update
|
||||
|
||||
@@ -79,6 +81,8 @@ func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
}
|
||||
|
||||
if !runnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
r.GitHubClient.DeinitForRunnerSet(runnerSet)
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -96,7 +100,7 @@ func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
desiredStatefulSet, err := r.newStatefulSet(runnerSet)
|
||||
desiredStatefulSet, err := r.newStatefulSet(ctx, runnerSet)
|
||||
if err != nil {
|
||||
r.Recorder.Event(runnerSet, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||
|
||||
@@ -129,6 +133,12 @@ func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
owners = append(owners, &ss)
|
||||
}
|
||||
|
||||
if res, err := syncVolumes(ctx, r.Client, log, req.Namespace, runnerSet, statefulsets); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
} else if res != nil {
|
||||
return *res, nil
|
||||
}
|
||||
|
||||
res, err := syncRunnerPodsOwners(ctx, r.Client, log, effectiveTime, newDesiredReplicas, func() client.Object { return create.DeepCopy() }, ephemeral, owners)
|
||||
if err != nil || res == nil {
|
||||
return ctrl.Result{}, err
|
||||
@@ -178,19 +188,50 @@ func getRunnerSetSelector(runnerSet *v1alpha1.RunnerSet) *metav1.LabelSelector {
|
||||
var LabelKeyPodMutation = "actions-runner-controller/inject-registration-token"
|
||||
var LabelValuePodMutation = "true"
|
||||
|
||||
func (r *RunnerSetReconciler) newStatefulSet(runnerSet *v1alpha1.RunnerSet) (*appsv1.StatefulSet, error) {
|
||||
func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1alpha1.RunnerSet) (*appsv1.StatefulSet, error) {
|
||||
runnerSetWithOverrides := *runnerSet.Spec.DeepCopy()
|
||||
|
||||
for _, l := range r.CommonRunnerLabels {
|
||||
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, l)
|
||||
}
|
||||
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, r.CommonRunnerLabels...)
|
||||
|
||||
template := corev1.Pod{
|
||||
ObjectMeta: runnerSetWithOverrides.StatefulSetSpec.Template.ObjectMeta,
|
||||
Spec: runnerSetWithOverrides.StatefulSetSpec.Template.Spec,
|
||||
}
|
||||
|
||||
pod, err := newRunnerPod(runnerSet.Name, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubBaseURL, false)
|
||||
if runnerSet.Spec.RunnerConfig.ContainerMode == "kubernetes" {
|
||||
found := false
|
||||
for i := range template.Spec.Containers {
|
||||
if template.Spec.Containers[i].Name == containerName {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
||||
Name: "runner",
|
||||
})
|
||||
}
|
||||
|
||||
workDir := runnerSet.Spec.RunnerConfig.WorkDir
|
||||
if workDir == "" {
|
||||
workDir = "/runner/_work"
|
||||
}
|
||||
if err := applyWorkVolumeClaimTemplateToPod(&template, runnerSet.Spec.WorkVolumeClaimTemplate, workDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
template.Spec.ServiceAccountName = runnerSet.Spec.ServiceAccountName
|
||||
}
|
||||
|
||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunnerSetName, runnerSet.Name)
|
||||
|
||||
ghc, err := r.GitHubClient.InitForRunnerSet(ctx, runnerSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
githubBaseURL := ghc.GithubBaseURL
|
||||
|
||||
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, githubBaseURL, r.UseRunnerStatusUpdateHook)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -605,3 +605,13 @@ func parseAndMatchRecurringPeriod(now time.Time, start, end, frequency, until st
|
||||
|
||||
return MatchSchedule(now, startTime, endTime, RecurrenceRule{Frequency: frequency, UntilTime: untilTime})
|
||||
}
|
||||
|
||||
func FuzzMatchSchedule(f *testing.F) {
|
||||
start := time.Now()
|
||||
end := time.Now()
|
||||
now := time.Now()
|
||||
f.Fuzz(func(t *testing.T, freq string) {
|
||||
// Verify that it never panics
|
||||
_, _, _ = MatchSchedule(now, start, end, RecurrenceRule{Frequency: freq})
|
||||
})
|
||||
}
|
||||
|
||||
181
controllers/sync_volumes.go
Normal file
181
controllers/sync_volumes.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/go-logr/logr"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
const (
|
||||
labelKeyCleanup = "pending-cleanup"
|
||||
labelKeyRunnerStatefulSetName = "runner-statefulset-name"
|
||||
)
|
||||
|
||||
func syncVolumes(ctx context.Context, c client.Client, log logr.Logger, ns string, runnerSet *v1alpha1.RunnerSet, statefulsets []appsv1.StatefulSet) (*ctrl.Result, error) {
|
||||
log = log.WithValues("ns", ns)
|
||||
|
||||
for _, t := range runnerSet.Spec.StatefulSetSpec.VolumeClaimTemplates {
|
||||
for _, sts := range statefulsets {
|
||||
pvcName := fmt.Sprintf("%s-%s-0", t.Name, sts.Name)
|
||||
|
||||
var pvc corev1.PersistentVolumeClaim
|
||||
if err := c.Get(ctx, types.NamespacedName{Namespace: ns, Name: pvcName}, &pvc); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO move this to statefulset reconciler so that we spam this less,
|
||||
// by starting the loop only after the statefulset got deletionTimestamp set.
|
||||
// Perhaps you can just wrap this in a finalizer here.
|
||||
if pvc.Labels[labelKeyRunnerStatefulSetName] == "" {
|
||||
updated := pvc.DeepCopy()
|
||||
updated.Labels[labelKeyRunnerStatefulSetName] = sts.Name
|
||||
if err := c.Update(ctx, updated); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.V(1).Info("Added runner-statefulset-name label to PVC", "sts", sts.Name, "pvc", pvcName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PVs are not namespaced hence we don't need client.InNamespace(ns).
|
||||
// If we added that, c.List will silently return zero items.
|
||||
//
|
||||
// This `List` needs to be done in a dedicated reconciler that is registered to the manager via the `For` func.
|
||||
// Otherwise the List func might return outdated contents(I saw status.phase being Bound even after K8s updated it to Released, and it lasted minutes).
|
||||
//
|
||||
// cleanupLabels := map[string]string{
|
||||
// labelKeyCleanup: runnerSet.Name,
|
||||
// }
|
||||
// pvList := &corev1.PersistentVolumeList{}
|
||||
// if err := c.List(ctx, pvList, client.MatchingLabels(cleanupLabels)); err != nil {
|
||||
// log.Info("retrying pv listing", "ns", ns, "err", err)
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func syncPVC(ctx context.Context, c client.Client, log logr.Logger, ns string, pvc *corev1.PersistentVolumeClaim) (*ctrl.Result, error) {
|
||||
stsName := pvc.Labels[labelKeyRunnerStatefulSetName]
|
||||
if stsName == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log.V(2).Info("Reconciling runner PVC")
|
||||
|
||||
var sts appsv1.StatefulSet
|
||||
if err := c.Get(ctx, types.NamespacedName{Namespace: ns, Name: stsName}, &sts); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// We assume that the statefulset is shortly terminated, hence retry forever until it gets removed.
|
||||
retry := 10 * time.Second
|
||||
log.V(1).Info("Retrying sync until statefulset gets removed", "requeueAfter", retry)
|
||||
return &ctrl.Result{RequeueAfter: retry}, nil
|
||||
}
|
||||
|
||||
log = log.WithValues("sts", stsName)
|
||||
|
||||
pvName := pvc.Spec.VolumeName
|
||||
|
||||
if pvName != "" {
|
||||
// If we deleted PVC before unsetting pv.spec.claimRef,
|
||||
// K8s seems to revive the claimRef :thinking:
|
||||
// So we need to mark PV for claimRef unset first, and delete PVC, and finally unset claimRef on PV.
|
||||
|
||||
var pv corev1.PersistentVolume
|
||||
if err := c.Get(ctx, types.NamespacedName{Namespace: ns, Name: pvName}, &pv); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
pvCopy := pv.DeepCopy()
|
||||
if pvCopy.Labels == nil {
|
||||
pvCopy.Labels = map[string]string{}
|
||||
}
|
||||
pvCopy.Labels[labelKeyCleanup] = stsName
|
||||
|
||||
log.V(2).Info("Scheduling to unset PV's claimRef", "pv", pv.Name)
|
||||
|
||||
// Apparently K8s doesn't reconcile PV immediately after PVC deletion.
|
||||
// So we start a relatively busy loop of PV reconcilation slightly before the PVC deletion,
|
||||
// so that PV can be unbound as soon as possible after the PVC got deleted.
|
||||
if err := c.Update(ctx, pvCopy); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Info("Updated PV to unset claimRef")
|
||||
|
||||
// At this point, the PV is still Bound
|
||||
|
||||
log.V(2).Info("Deleting unused PVC")
|
||||
|
||||
if err := c.Delete(ctx, pvc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Info("Deleted unused PVC")
|
||||
|
||||
// At this point, the PV is still "Bound", but we are ready to unset pv.spec.claimRef in pv controller.
|
||||
// Once the pv controller unsets claimRef, the PV becomes "Released", hence available for reuse by another eligible PVC.
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func syncPV(ctx context.Context, c client.Client, log logr.Logger, ns string, pv *corev1.PersistentVolume) (*ctrl.Result, error) {
|
||||
if pv.Spec.ClaimRef == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log.V(2).Info("Reconciling PV")
|
||||
|
||||
if pv.Labels[labelKeyCleanup] == "" {
|
||||
// We assume that the pvc is shortly terminated, hence retry forever until it gets removed.
|
||||
retry := 10 * time.Second
|
||||
log.V(2).Info("Retrying sync to see if this PV needs to be managed by ARC", "requeueAfter", retry)
|
||||
return &ctrl.Result{RequeueAfter: retry}, nil
|
||||
}
|
||||
|
||||
log.V(2).Info("checking pv phase", "phase", pv.Status.Phase)
|
||||
|
||||
if pv.Status.Phase != corev1.VolumeReleased {
|
||||
// We assume that the pvc is shortly terminated, hence retry forever until it gets removed.
|
||||
retry := 10 * time.Second
|
||||
log.V(1).Info("Retrying sync until pvc gets released", "requeueAfter", retry)
|
||||
return &ctrl.Result{RequeueAfter: retry}, nil
|
||||
}
|
||||
|
||||
// At this point, the PV is still Released
|
||||
|
||||
pvCopy := pv.DeepCopy()
|
||||
delete(pvCopy.Labels, labelKeyCleanup)
|
||||
pvCopy.Spec.ClaimRef = nil
|
||||
log.V(2).Info("Unsetting PV's claimRef", "pv", pv.Name)
|
||||
if err := c.Update(ctx, pvCopy); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Info("PV should be Available now")
|
||||
|
||||
// At this point, the PV becomes Available, if it's reclaim policy is "Retain".
|
||||
// I have not yet tested it with "Delete" but perhaps it's deleted automatically after the update?
|
||||
// https://kubernetes.io/docs/concepts/storage/persistent-volumes/#retain
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
31
controllers/testresourcereader.go
Normal file
31
controllers/testresourcereader.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"reflect"
|
||||
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type testResourceReader struct {
|
||||
objects map[types.NamespacedName]client.Object
|
||||
}
|
||||
|
||||
func (r *testResourceReader) Get(_ context.Context, nsName types.NamespacedName, obj client.Object) error {
|
||||
ret, ok := r.objects[nsName]
|
||||
if !ok {
|
||||
return &kerrors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}}
|
||||
}
|
||||
v := reflect.ValueOf(obj)
|
||||
if v.Kind() != reflect.Ptr {
|
||||
return errors.New("obj must be a pointer")
|
||||
}
|
||||
|
||||
v.Elem().Set(reflect.ValueOf(ret).Elem())
|
||||
|
||||
return nil
|
||||
}
|
||||
35
controllers/testresourcereader_test.go
Normal file
35
controllers/testresourcereader_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func TestResourceReader(t *testing.T) {
|
||||
rr := &testResourceReader{
|
||||
objects: map[types.NamespacedName]client.Object{
|
||||
{Namespace: "default", Name: "sec1"}: &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "sec1",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"foo": []byte("bar"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var sec corev1.Secret
|
||||
|
||||
err := rr.Get(context.Background(), types.NamespacedName{Namespace: "default", Name: "sec1"}, &sec)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, []byte("bar"), sec.Data["foo"])
|
||||
}
|
||||
@@ -3,6 +3,9 @@ package controllers
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func Test_filterLabels(t *testing.T) {
|
||||
@@ -32,3 +35,94 @@ func Test_filterLabels(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_workVolumeClaimTemplateVolumeV1VolumeTransformation(t *testing.T) {
|
||||
storageClassName := "local-storage"
|
||||
workVolumeClaimTemplate := v1alpha1.WorkVolumeClaimTemplate{
|
||||
StorageClassName: storageClassName,
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce, corev1.ReadWriteMany},
|
||||
Resources: corev1.ResourceRequirements{},
|
||||
}
|
||||
want := corev1.Volume{
|
||||
Name: "work",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Ephemeral: &corev1.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &corev1.PersistentVolumeClaimTemplate{
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce, corev1.ReadWriteMany},
|
||||
StorageClassName: &storageClassName,
|
||||
Resources: corev1.ResourceRequirements{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
got := workVolumeClaimTemplate.V1Volume()
|
||||
|
||||
if got.Name != want.Name {
|
||||
t.Errorf("want name %q, got %q\n", want.Name, got.Name)
|
||||
}
|
||||
|
||||
if got.VolumeSource.Ephemeral == nil {
|
||||
t.Fatal("work volume claim template should transform itself into Ephemeral volume source\n")
|
||||
}
|
||||
|
||||
if got.VolumeSource.Ephemeral.VolumeClaimTemplate == nil {
|
||||
t.Fatal("work volume claim template should have ephemeral volume claim template set\n")
|
||||
}
|
||||
|
||||
gotClassName := *got.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||
wantClassName := *want.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||
if gotClassName != wantClassName {
|
||||
t.Errorf("expected storage class name %q, got %q\n", wantClassName, gotClassName)
|
||||
}
|
||||
|
||||
gotAccessModes := got.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||
wantAccessModes := want.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||
if len(gotAccessModes) != len(wantAccessModes) {
|
||||
t.Fatalf("access modes lengths missmatch: got %v, expected %v\n", gotAccessModes, wantAccessModes)
|
||||
}
|
||||
|
||||
diff := make(map[corev1.PersistentVolumeAccessMode]int, len(wantAccessModes))
|
||||
for _, am := range wantAccessModes {
|
||||
diff[am]++
|
||||
}
|
||||
|
||||
for _, am := range gotAccessModes {
|
||||
_, ok := diff[am]
|
||||
if !ok {
|
||||
t.Errorf("got access mode %v that is not in the wanted access modes\n", am)
|
||||
}
|
||||
|
||||
diff[am]--
|
||||
if diff[am] == 0 {
|
||||
delete(diff, am)
|
||||
}
|
||||
}
|
||||
|
||||
if len(diff) != 0 {
|
||||
t.Fatalf("got access modes did not take every access mode into account\nactual: %v expected: %v\n", gotAccessModes, wantAccessModes)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_workVolumeClaimTemplateV1VolumeMount(t *testing.T) {
|
||||
|
||||
workVolumeClaimTemplate := v1alpha1.WorkVolumeClaimTemplate{
|
||||
StorageClassName: "local-storage",
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce, corev1.ReadWriteMany},
|
||||
Resources: corev1.ResourceRequirements{},
|
||||
}
|
||||
|
||||
mountPath := "/test/_work"
|
||||
want := corev1.VolumeMount{
|
||||
MountPath: mountPath,
|
||||
Name: "work",
|
||||
}
|
||||
|
||||
got := workVolumeClaimTemplate.V1VolumeMount(mountPath)
|
||||
|
||||
if want != got {
|
||||
t.Fatalf("expected volume mount %+v, actual %+v\n", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
54
docs/releasenotes/0.24.md
Normal file
54
docs/releasenotes/0.24.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# actions-runner-controller v0.24.0
|
||||
|
||||
All changes in this release can be found in the milestone https://github.com/actions-runner-controller/actions-runner-controller/milestone/4
|
||||
|
||||
This log documents breaking and major enhancements
|
||||
|
||||
## Upgrading
|
||||
|
||||
In case you're using our Helm chart to deploy ARC, use the chart 0.19.0 or greater. Don't miss upgrading CRDs as usual! Helm doesn't upgrade CRDs.
|
||||
|
||||
## BREAKING CHANGE : Support for `--once` is being dropped
|
||||
|
||||
> **Warning**: If you're using ARC's official runer image, make sure to update the image tag to `v2.292.0` BEFORE upgrading ARC
|
||||
|
||||
In #1385 we changed ARC to NOT automatically set the feature flag `RUNNER_FEATURE_FLAG_EPHEMERAL=true`. If you're using ARC's official runer image, make sure to update the image tag to `v2.292.0` before upgrading ARC, because that's the first runner image release since we changed the default to `--ephemeral`. If you kept using an older runner image after upgrading ARC, you end up using `--once` which is unreliable and had been deprecated since almost a year ago.
|
||||
|
||||
>> **Warning**: If you're using a custom runner image, incorporate changes made in #1384 to your runner image dockerfile
|
||||
|
||||
If you're building a custom runner image on your own and it still requires the user to specify `RUNNER_FEATURE_FLAG_EPHEMERAL=true` to use `--ephemeral`, check #1384 and update your custom runner image dockerfile accordingly. Otherwise, you may unexpectedly end up with using `--once` after upgrading ARC, because that was the previous default.
|
||||
|
||||
Relevant PR(s): #1384, #1385
|
||||
|
||||
## FIX : Prevent runner form stucking in Terminating when the container disappeared
|
||||
|
||||
We occasionally heard about runnner pods stuck in Terminating after the node and containers running on it disappeared due to, for example, the machine terminated prematurely.
|
||||
|
||||
We now set runner pods' restartPolicy to `Never` and remove runner pods stuck in `Waiting` after restarting, so that the pods are more likely to NOT stuck forever.
|
||||
|
||||
Relevant PR(s): #1395, #1420
|
||||
|
||||
## ENHANCEMENT : Support arbitrarily setting `privileged: true` for runner container
|
||||
|
||||
This is a frequently asked feature that alows you to force `privileged: true` in case you don't need docker but still need privileged tasks to be run in a job step.
|
||||
|
||||
In combination with a container runtime like `sysbox` this should enable you to run docker builds within the dind sidecar, all without privileges. See [the discussion related to Sysbox](https://github.com/actions-runner-controller/actions-runner-controller/discussions/977) for more information.
|
||||
|
||||
Note that we ARC maintainers still have no bandwidth to provide a complete description on how to make ARC work with `sysbox` yet, but almost certainly we'd welcome contributions to the documentation if you managed to make it work.
|
||||
|
||||
Relevant PR(s): #1383
|
||||
|
||||
## ENHANCEMENT : RunnerSet can now retain PVs accross restarts
|
||||
|
||||
This enhancement makes it more practical to use RunnerSet in combination with `volumeClaimTemplates` to make your workflow jobs faster.
|
||||
|
||||
Please see our updated ["Custom Volume Mounts" section in the documentation](https://github.com/actions-runner-controller/actions-runner-controller#custom-volume-mounts) for more information. Currently, we cover caching Docker image layers, go mod/build, and PV-backed runner work directory(Although this one is backed by another feature unrelated to this enhancement under the hood).
|
||||
|
||||
Relevant PR(s): #1340
|
||||
|
||||
## ENHANCEMENT : OpenSSF scorecard adoption
|
||||
|
||||
We assessed the project's security by following OpenSSF scorecard checks and adopting OpenSSF best practices.
|
||||
It should help you judge the security throughout ARC's development and release processes.
|
||||
|
||||
Relevant PR(s): #1461
|
||||
43
docs/releasenotes/0.25.md
Normal file
43
docs/releasenotes/0.25.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# actions-runner-controller v0.25.0
|
||||
|
||||
All planned changes in this release can be found in the milestone https://github.com/actions-runner-controller/actions-runner-controller/milestone/8.
|
||||
|
||||
Also see https://github.com/actions-runner-controller/actions-runner-controller/compare/v0.24.1...v0.25.0 for full changelog.
|
||||
|
||||
This log documents breaking changes and major enhancements
|
||||
|
||||
## Upgrading
|
||||
|
||||
In case you're using our Helm chart to deploy ARC, use the chart 0.20.0 or greater. Don't miss upgrading CRDs as usual! Helm doesn't upgrade CRDs.
|
||||
|
||||
## BREAKING CHANGE : Support for `--once` has been dropped
|
||||
|
||||
In case you're still on ARC v0.23.0 or earlier, please also read [the relevant part of v0.24.0 release note for more information](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/docs/releasenotes/0.24.md#breaking-change--support-for---once-is-being-dropped).
|
||||
|
||||
Relevant PR(s): #1580, #1590
|
||||
|
||||
## ENHANCEMENT : Support for the new Kubernetes container mode of Actions runner
|
||||
|
||||
The GitHub Actions team has recently added `actions/runner` an ability to use [runner container hooks](https://github.com/actions/runner-container-hooks) to run job steps on Kubernetes pods instead of docker containers created by the `docker` command. It allows us to avoid the use of privileged containers while still being able to run container-backed job steps.
|
||||
|
||||
To use the new container mode, you set `.spec.template.spec.containerMode` in `RunnerDeployment` to `"kubernetes"`, while defining `.spec.template.spec.workVolumeClaimTemplate`. The volume claim template is used for provisioning and assigning persistent volumes mounted across the runner pod and the job pods for sharing the job workspace.
|
||||
|
||||
Before using this feature, we highly recommend you to read [the detailed explanation in the original pull request](https://github.com/actions-runner-controller/actions-runner-controller/pull/1546), and [the new section in ARC's documentation](https://github.com/actions-runner-controller/actions-runner-controller#runner-with-k8s-jobs).
|
||||
|
||||
Big kudos to @thboop and the GitHub Actions team for implementing and contributing this feature!
|
||||
|
||||
Relevant PR(s): #1546
|
||||
|
||||
## FIX : Webhook-based scaling is even more reliable
|
||||
|
||||
We fixed a race condition in the webhook-based autoscaler that resulted in not adding a runner when necessary.
|
||||
|
||||
The race condition had been happening when it received a webhook event while processing another webhook event and both ended up scaling up the same horizontal runner autoscaler at the same time.
|
||||
|
||||
To mitigate that, ARC now uses Kubernetes' Update API instead of Patch to update `HRA.spec.capacityReservations` which is the underlying data structure that makes the webhook-based scaler to add replicas to RunnerDeployment or RunnerSet on demand.
|
||||
|
||||
We were also worried about stressing the Kubernetes apiserver when your ARC webhook-based autoscaler received a lot of concurrent webhook events, we also enhanced it to batch the Update API calls for 3 seconds, which basically means it will call the Update API at most once every 3 seconds per webhook-based autoscaler instance.
|
||||
|
||||
Lastly, we fixed a bug in the autoscaler that resulted in it to stop adding replicas for newly received webhook events when the desired replicas reached `maxReplicas`.
|
||||
|
||||
Relevant PR(s): #1477, #1568
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
|
||||
"github.com/google/go-github/v39/github"
|
||||
"github.com/google/go-github/v45/github"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github/metrics"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/logging"
|
||||
"github.com/bradleyfalzon/ghinstallation"
|
||||
"github.com/bradleyfalzon/ghinstallation/v2"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/google/go-github/v39/github"
|
||||
"github.com/google/go-github/v45/github"
|
||||
"github.com/gregjones/httpcache"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
@@ -248,7 +248,8 @@ func (c *Client) ListRunners(ctx context.Context, enterprise, org, repo string)
|
||||
func (c *Client) ListOrganizationRunnerGroups(ctx context.Context, org string) ([]*github.RunnerGroup, error) {
|
||||
var runnerGroups []*github.RunnerGroup
|
||||
|
||||
opts := github.ListOptions{PerPage: 100}
|
||||
opts := github.ListOrgRunnerGroupOptions{}
|
||||
opts.PerPage = 100
|
||||
for {
|
||||
list, res, err := c.Client.Actions.ListOrganizationRunnerGroups(ctx, org, &opts)
|
||||
if err != nil {
|
||||
@@ -271,9 +272,21 @@ func (c *Client) ListOrganizationRunnerGroups(ctx context.Context, org string) (
|
||||
func (c *Client) ListOrganizationRunnerGroupsForRepository(ctx context.Context, org, repo string) ([]*github.RunnerGroup, error) {
|
||||
var runnerGroups []*github.RunnerGroup
|
||||
|
||||
opts := github.ListOptions{PerPage: 100}
|
||||
var opts github.ListOrgRunnerGroupOptions
|
||||
|
||||
opts.PerPage = 100
|
||||
|
||||
repoName := repo
|
||||
parts := strings.Split(repo, "/")
|
||||
if len(parts) == 2 {
|
||||
repoName = parts[1]
|
||||
}
|
||||
// This must be the repo name without the owner part, so in case the repo is "myorg/myrepo" the repo name
|
||||
// passed to visible_to_repository must be "myrepo".
|
||||
opts.VisibleToRepository = repoName
|
||||
|
||||
for {
|
||||
list, res, err := c.listOrganizationRunnerGroupsVisibleToRepo(ctx, org, repo, &opts)
|
||||
list, res, err := c.Actions.ListOrganizationRunnerGroups(ctx, org, &opts)
|
||||
if err != nil {
|
||||
return runnerGroups, fmt.Errorf("failed to list organization runner groups: %w", err)
|
||||
}
|
||||
@@ -309,42 +322,6 @@ func (c *Client) ListRunnerGroupRepositoryAccesses(ctx context.Context, org stri
|
||||
return repos, nil
|
||||
}
|
||||
|
||||
// listOrganizationRunnerGroupsVisibleToRepo lists all self-hosted runner groups configured in an organization which can be used by the repository.
|
||||
//
|
||||
// GitHub API docs: https://docs.github.com/en/rest/reference/actions#list-self-hosted-runner-groups-for-an-organization
|
||||
func (c *Client) listOrganizationRunnerGroupsVisibleToRepo(ctx context.Context, org, repo string, opts *github.ListOptions) (*github.RunnerGroups, *github.Response, error) {
|
||||
repoName := repo
|
||||
parts := strings.Split(repo, "/")
|
||||
if len(parts) == 2 {
|
||||
repoName = parts[1]
|
||||
}
|
||||
|
||||
u := fmt.Sprintf("orgs/%v/actions/runner-groups?visible_to_repository=%v", org, repoName)
|
||||
|
||||
if opts != nil {
|
||||
if opts.PerPage > 0 {
|
||||
u = fmt.Sprintf("%v&per_page=%v", u, opts.PerPage)
|
||||
}
|
||||
|
||||
if opts.Page > 0 {
|
||||
u = fmt.Sprintf("%v&page=%v", u, opts.Page)
|
||||
}
|
||||
}
|
||||
|
||||
req, err := c.Client.NewRequest("GET", u, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
groups := &github.RunnerGroups{}
|
||||
resp, err := c.Client.Do(ctx, req, &groups)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return groups, resp, nil
|
||||
}
|
||||
|
||||
// cleanup removes expired registration tokens.
|
||||
func (c *Client) cleanup() {
|
||||
c.mu.Lock()
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
||||
"github.com/google/go-github/v39/github"
|
||||
"github.com/google/go-github/v45/github"
|
||||
)
|
||||
|
||||
var server *httptest.Server
|
||||
|
||||
62
go.mod
62
go.mod
@@ -1,52 +1,62 @@
|
||||
module github.com/actions-runner-controller/actions-runner-controller
|
||||
|
||||
go 1.17
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/bradleyfalzon/ghinstallation v1.1.1
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.1.0
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/go-logr/logr v1.2.0
|
||||
github.com/go-logr/logr v1.2.3
|
||||
github.com/google/go-cmp v0.5.8
|
||||
github.com/google/go-github/v39 v39.2.0
|
||||
github.com/google/go-github/v45 v45.2.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/gomega v1.17.0
|
||||
github.com/prometheus/client_golang v1.12.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/onsi/gomega v1.19.0
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/teambition/rrule-go v1.8.0
|
||||
go.uber.org/zap v1.21.0
|
||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0
|
||||
k8s.io/api v0.23.5
|
||||
k8s.io/apimachinery v0.23.5
|
||||
k8s.io/client-go v0.23.5
|
||||
sigs.k8s.io/controller-runtime v0.11.2
|
||||
k8s.io/api v0.24.2
|
||||
k8s.io/apimachinery v0.24.2
|
||||
k8s.io/client-go v0.24.2
|
||||
sigs.k8s.io/controller-runtime v0.12.3
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.81.0 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/go-logr/zapr v1.2.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.4.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-github/v29 v29.0.2 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-github/v41 v41.0.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.1.2 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
@@ -56,24 +66,24 @@ require (
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/apiextensions-apiserver v0.23.5 // indirect
|
||||
k8s.io/component-base v0.23.5 // indirect
|
||||
k8s.io/klog/v2 v2.30.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.24.2 // indirect
|
||||
k8s.io/component-base v0.24.2 // indirect
|
||||
k8s.io/klog/v2 v2.60.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
)
|
||||
|
||||
|
||||
152
go.sum
152
go.sum
@@ -52,7 +52,9 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/actions-runner-controller/httpcache v0.2.0 h1:hCNvYuVPJ2xxYBymqBvH0hSiQpqz4PHF/LbU3XghGNI=
|
||||
github.com/actions-runner-controller/httpcache v0.2.0/go.mod h1:JLu9/2M/btPz1Zu/vTZ71XzukQHn2YeISPmJoM5exBI=
|
||||
@@ -66,6 +68,7 @@ github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.m
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
@@ -78,12 +81,14 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/bradleyfalzon/ghinstallation v1.1.1 h1:pmBXkxgM1WeF8QYvDLT5kuQiHMcmf+X015GI0KM/E3I=
|
||||
github.com/bradleyfalzon/ghinstallation v1.1.1/go.mod h1:vyCmHTciHx/uuyN82Zc3rXN3X2KTK8nUTCrTMwAhcug=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.0.4 h1:tXKVfhE7FcSkhkv0UwkLvPDeZ4kz6OXd0PKPlFqf81M=
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.0.4/go.mod h1:B40qPqJxWE0jDZgOR1JmaMy+4AY1eBP+IByOvqyAKp0=
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.1.0 h1:5+NghM1Zred9Z078QEZtm28G/kfDfZN/92gkDlLwGVA=
|
||||
github.com/bradleyfalzon/ghinstallation/v2 v2.1.0/go.mod h1:Xg3xPRN5Mcq6GDqeUVhFbjEWMb4JHCyWEeeBGEYQoTU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
@@ -107,18 +112,19 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
@@ -153,15 +159,19 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk=
|
||||
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
|
||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
|
||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
@@ -171,6 +181,10 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o=
|
||||
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
@@ -209,7 +223,10 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
|
||||
github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
|
||||
github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
|
||||
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
|
||||
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@@ -222,15 +239,12 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-github/v29 v29.0.2 h1:opYN6Wc7DOz7Ku3Oh4l7prmkOMwEcQxpFtxdU8N8Pts=
|
||||
github.com/google/go-github/v29 v29.0.2/go.mod h1:CHKiKKPHJ0REzfwc14QMklvtHwCveD0PxlMjLlzAM5E=
|
||||
github.com/google/go-github/v39 v39.2.0 h1:rNNM311XtPOz5rDdsJXAp2o8F67X9FnROXTvto3aSnQ=
|
||||
github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/go-github/v41 v41.0.0 h1:HseJrM2JFf2vfiZJ8anY2hqBjdfY1Vlj/K27ueww4gg=
|
||||
github.com/google/go-github/v41 v41.0.0/go.mod h1:XgmCA5H323A9rtgExdTcnDkcqp6S30AVACCBDOonIxg=
|
||||
github.com/google/go-github/v45 v45.2.0 h1:5oRLszbrkvxDDqBCNj2hjDZMKmvexaZ1xw/FCD+K3FI=
|
||||
github.com/google/go-github/v45 v45.2.0/go.mod h1:FObaZJEDSTa/WGCzZ2Z3eoCDXWJKMenWWTrd8jrta28=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@@ -297,6 +311,7 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
@@ -329,6 +344,7 @@ github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
@@ -347,6 +363,7 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -355,6 +372,7 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
@@ -369,14 +387,14 @@ github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
@@ -396,8 +414,9 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
||||
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -423,6 +442,7 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
@@ -443,6 +463,7 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
||||
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
@@ -453,16 +474,20 @@ github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.5 h1:s5PTfem8p8EbKQOctVV53k6jCJt3UX4IEJzwh+C324Q=
|
||||
github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/teambition/rrule-go v1.7.2 h1:goEajFWYydfCgavn2m/3w5U+1b3PGqPUHx/fFSVfTy0=
|
||||
github.com/teambition/rrule-go v1.7.2/go.mod h1:mBJ1Ht5uboJ6jexKdNUJg2NcwP8uUMNvStWXlJD3MvU=
|
||||
github.com/teambition/rrule-go v1.8.0 h1:a/IX5s56hGkFF+nRlJUooZU/45OTeeldBGL29nDKIHw=
|
||||
github.com/teambition/rrule-go v1.8.0/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
@@ -474,12 +499,16 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
||||
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
|
||||
go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
|
||||
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
|
||||
@@ -506,17 +535,14 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
|
||||
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
|
||||
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@@ -530,6 +556,9 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -565,6 +594,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -608,13 +638,17 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -629,8 +663,13 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a h1:qfl7ob3DIEs3Ml9oLuPwY2N04gymzAW04WsUQHIClgM=
|
||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26 h1:uBgVQYJLi/m8M0wzp+aGwBWt90gMRoOVf+aWTW10QHI=
|
||||
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 h1:VnGaRqoLmqZH/3TMLJwYCEWkR4j1nuIU1U9TvbqsDUw=
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -707,10 +746,14 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
@@ -731,6 +774,8 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -790,10 +835,10 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
|
||||
golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY=
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
|
||||
@@ -872,6 +917,7 @@ google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
@@ -908,6 +954,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -939,6 +987,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
@@ -948,58 +998,62 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg=
|
||||
k8s.io/api v0.23.4 h1:85gnfXQOWbJa1SiWGpE9EEtHs0UVvDyIsSMpEtl2D4E=
|
||||
k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI=
|
||||
k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA=
|
||||
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
|
||||
k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY=
|
||||
k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4=
|
||||
k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI=
|
||||
k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg=
|
||||
k8s.io/apiextensions-apiserver v0.23.5 h1:5SKzdXyvIJKu+zbfPc3kCbWpbxi+O+zdmAJBm26UJqI=
|
||||
k8s.io/apiextensions-apiserver v0.23.5/go.mod h1:ntcPWNXS8ZPKN+zTXuzYMeg731CP0heCTl6gYBxLcuQ=
|
||||
k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc=
|
||||
k8s.io/apimachinery v0.23.4 h1:fhnuMd/xUL3Cjfl64j5ULKZ1/J9n8NuQEgNL+WXWfdM=
|
||||
k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||
k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k=
|
||||
k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ=
|
||||
k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0=
|
||||
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||
k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4=
|
||||
k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM=
|
||||
k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
|
||||
k8s.io/apiserver v0.23.5/go.mod h1:7wvMtGJ42VRxzgVI7jkbKvMbuCbVbgsWFT7RyXiRNTw=
|
||||
k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA=
|
||||
k8s.io/client-go v0.23.4 h1:YVWvPeerA2gpUudLelvsolzH7c2sFoXXR5wM/sWqNFU=
|
||||
k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0=
|
||||
k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI=
|
||||
k8s.io/client-go v0.23.5 h1:zUXHmEuqx0RY4+CsnkOn5l0GU+skkRXKGJrhmE2SLd8=
|
||||
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
|
||||
k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE=
|
||||
k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA=
|
||||
k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30=
|
||||
k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
||||
k8s.io/component-base v0.23.0 h1:UAnyzjvVZ2ZR1lF35YwtNY6VMN94WtOnArcXBu34es8=
|
||||
k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI=
|
||||
k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
|
||||
k8s.io/component-base v0.23.5 h1:8qgP5R6jG1BBSXmRYW+dsmitIrpk8F/fPEvgDenMCCE=
|
||||
k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0=
|
||||
k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU=
|
||||
k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
|
||||
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE=
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
|
||||
sigs.k8s.io/controller-runtime v0.11.1 h1:7YIHT2QnHJArj/dk9aUkYhfqfK5cIxPOX5gPECfdZLU=
|
||||
sigs.k8s.io/controller-runtime v0.11.1/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA=
|
||||
sigs.k8s.io/controller-runtime v0.11.2 h1:H5GTxQl0Mc9UjRJhORusqfJCIjBO8UtUxGggCwL1rLA=
|
||||
sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8MRFsn4dWF7O4=
|
||||
sigs.k8s.io/controller-runtime v0.12.2 h1:nqV02cvhbAj7tbt21bpPpTByrXGn2INHRsi39lXy9sE=
|
||||
sigs.k8s.io/controller-runtime v0.12.2/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0=
|
||||
sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio=
|
||||
sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
||||
67
hack/signrel/README.md
Normal file
67
hack/signrel/README.md
Normal file
@@ -0,0 +1,67 @@
|
||||
# signrel
|
||||
|
||||
`signrel` is the utility command for downloading `actions-runner-controller` release assets, sigining those, and uploading the signature files.
|
||||
|
||||
## Verifying Release Assets
|
||||
|
||||
For users, browse https://keys.openpgp.org/search?q=D8078411E3D8400B574EDB0441B69B728F095A87 and download the public key, or refer to [the instruction](https://keys.openpgp.org/about/usage#gnupg-retrieve) to import the key onto your machine.
|
||||
|
||||
Next, you'll want to verify the signature of the download asset somehow.
|
||||
|
||||
With `gpg`, you would usually do that by downloading both the asset and the signature files from our specific release page, and run `gpg --verify` like:
|
||||
|
||||
```console
|
||||
# Download the asset
|
||||
curl -LO https://github.com/actions-runner-controller/actions-runner-controller/releases/download/v0.23.0/actions-runner-controller.yaml
|
||||
|
||||
# Download the signature file
|
||||
curl -LO https://github.com/actions-runner-controller/actions-runner-controller/releases/download/v0.23.0/actions-runner-controller.yaml.asc
|
||||
|
||||
# Verify
|
||||
gpg --verify actions-runner-controller.yaml{.asc,}
|
||||
```
|
||||
|
||||
On succesful verification, the gpg command would output:
|
||||
|
||||
```
|
||||
gpg: Signature made Tue 10 May 2022 04:15:32 AM UTC
|
||||
gpg: using RSA key D8078411E3D8400B574EDB0441B69B728F095A87
|
||||
gpg: checking the trustdb
|
||||
gpg: marginals needed: 3 completes needed: 1 trust model: pgp
|
||||
gpg: depth: 0 valid: 1 signed: 0 trust: 0-, 0q, 0n, 0m, 0f, 1u
|
||||
gpg: next trustdb check due at 2024-05-09
|
||||
gpg: Good signature from "Yusuke Kuoka <ykuoka@gmail.com>" [ultimate]
|
||||
```
|
||||
|
||||
## Signing Release Assets
|
||||
|
||||
Assuming you are a maintainer of the project who has admin permission, run the command like the below to sign assets and upload the signature files:
|
||||
|
||||
```console
|
||||
$ cd hack/signrel
|
||||
|
||||
$ for v in v0.23.0 actions-runner-controller-0.18.0 v0.22.3 v0.22.2 actions-runner-controller-0.17.2; do TAG=$v go run . sign; done
|
||||
|
||||
Downloading actions-runner-controller.yaml to downloads/v0.23.0/actions-runner-controller.yaml
|
||||
Uploading downloads/v0.23.0/actions-runner-controller.yaml.asc
|
||||
downloads/v0.23.0/actions-runner-controller.yaml.asc has been already uploaded
|
||||
Downloading actions-runner-controller-0.18.0.tgz to downloads/actions-runner-controller-0.18.0/actions-runner-controller-0.18.0.tgz
|
||||
Uploading downloads/actions-runner-controller-0.18.0/actions-runner-controller-0.18.0.tgz.asc
|
||||
Upload completed: *snip*
|
||||
Downloading actions-runner-controller.yaml to downloads/v0.22.3/actions-runner-controller.yaml
|
||||
Uploading downloads/v0.22.3/actions-runner-controller.yaml.asc
|
||||
Upload completed: *snip*
|
||||
Downloading actions-runner-controller.yaml to downloads/v0.22.2/actions-runner-controller.yaml
|
||||
Uploading downloads/v0.22.2/actions-runner-controller.yaml.asc
|
||||
Upload completed: *snip*
|
||||
Downloading actions-runner-controller-0.17.2.tgz to downloads/actions-runner-controller-0.17.2/actions-runner-controller-0.17.2.tgz
|
||||
Uploading downloads/actions-runner-controller-0.17.2/actions-runner-controller-0.17.2.tgz.asc
|
||||
Upload completed: *snip*
|
||||
actions-runner-controller-0.17.2.tgz.asc"}
|
||||
```
|
||||
|
||||
To retrieve all the available release tags, run:
|
||||
|
||||
```
|
||||
$ go run . tags | jq -r .[].tag_name
|
||||
```
|
||||
3
hack/signrel/go.mod
Normal file
3
hack/signrel/go.mod
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/actions-runner-controller/actions-runner-controller/hack/sigrel
|
||||
|
||||
go 1.17
|
||||
325
hack/signrel/main.go
Normal file
325
hack/signrel/main.go
Normal file
@@ -0,0 +1,325 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func main() {
|
||||
a := &githubReleaseAsset{}
|
||||
|
||||
if len(os.Args) != 2 {
|
||||
fmt.Fprintf(os.Stderr, "Invalid command: %v\n", os.Args)
|
||||
fmt.Fprintf(os.Stderr, "USAGE: signrel [list-tags|sign-assets]\n")
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
switch cmd := os.Args[1]; cmd {
|
||||
case "tags":
|
||||
listTags(a)
|
||||
case "sign":
|
||||
tag := os.Getenv("TAG")
|
||||
sign(a, tag)
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "Unknown command %s\n", cmd)
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
||||
|
||||
func listTags(a *githubReleaseAsset) {
|
||||
_, err := a.getRecentReleases(owner, repo)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func sign(a *githubReleaseAsset, tag string) {
|
||||
if err := a.Download(tag, "downloads"); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
type Release struct {
|
||||
ID int64 `json:"id"`
|
||||
}
|
||||
|
||||
type Asset struct {
|
||||
Name string `json:"name"`
|
||||
ID int64 `json:"id"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
type AssetsResponse struct {
|
||||
Assets []Asset
|
||||
}
|
||||
|
||||
type githubReleaseAsset struct {
|
||||
}
|
||||
|
||||
const (
|
||||
owner = "actions-runner-controller"
|
||||
repo = "actions-runner-controller"
|
||||
)
|
||||
|
||||
// GetFile downloads the give URL into the given path. The URL must
|
||||
// reference a single file. If possible, the Getter should check if
|
||||
// the remote end contains the same file and no-op this operation.
|
||||
func (a *githubReleaseAsset) Download(tag string, dstDir string) error {
|
||||
release, err := a.getReleaseByTag(owner, repo, tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
assets, err := a.getAssetsByReleaseID(owner, repo, release.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d := filepath.Join(dstDir, tag)
|
||||
if err := os.MkdirAll(d, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, asset := range assets {
|
||||
if strings.HasSuffix(asset.Name, ".asc") {
|
||||
continue
|
||||
}
|
||||
|
||||
p := filepath.Join(d, asset.Name)
|
||||
fmt.Fprintf(os.Stderr, "Downloading %s to %s\n", asset.Name, p)
|
||||
if err := a.getFile(p, owner, repo, asset.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info, _ := os.Stat(p + ".asc"); info == nil {
|
||||
_, err := a.sign(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
sig := p + ".asc"
|
||||
|
||||
fmt.Fprintf(os.Stdout, "Uploading %s\n", sig)
|
||||
|
||||
if err := a.upload(sig, release.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *githubReleaseAsset) getRecentReleases(owner, repo string) (*Release, error) {
|
||||
reqURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases", owner, repo)
|
||||
|
||||
req, err := http.NewRequest("GET", reqURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if gt := os.Getenv("GITHUB_TOKEN"); gt != "" {
|
||||
req.Header = make(http.Header)
|
||||
req.Header.Add("authorization", "token "+gt)
|
||||
}
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("GET %s: %s", reqURL, res.Status)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stdout, "%s\n", string(body))
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (a *githubReleaseAsset) getReleaseByTag(owner, repo, tag string) (*Release, error) {
|
||||
var release Release
|
||||
|
||||
reqURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/tags/%s", owner, repo, tag)
|
||||
|
||||
req, err := http.NewRequest("GET", reqURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if gt := os.Getenv("GITHUB_TOKEN"); gt != "" {
|
||||
req.Header = make(http.Header)
|
||||
req.Header.Add("authorization", "token "+gt)
|
||||
}
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("GET %s: %s", reqURL, res.Status)
|
||||
}
|
||||
|
||||
d := json.NewDecoder(res.Body)
|
||||
|
||||
if err := d.Decode(&release); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &release, nil
|
||||
}
|
||||
|
||||
func (a *githubReleaseAsset) getAssetsByReleaseID(owner, repo string, releaseID int64) ([]Asset, error) {
|
||||
var assets []Asset
|
||||
|
||||
reqURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/%d/assets", owner, repo, releaseID)
|
||||
|
||||
req, err := http.NewRequest("GET", reqURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if gt := os.Getenv("GITHUB_TOKEN"); gt != "" {
|
||||
req.Header = make(http.Header)
|
||||
req.Header.Add("authorization", "token "+gt)
|
||||
}
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("GET %s: %s", reqURL, res.Status)
|
||||
}
|
||||
|
||||
d := json.NewDecoder(res.Body)
|
||||
|
||||
if err := d.Decode(&assets); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return assets, nil
|
||||
}
|
||||
|
||||
func (a *githubReleaseAsset) getFile(dst string, owner, repo string, assetID int64) error {
|
||||
// Create all the parent directories if needed
|
||||
dir := filepath.Dir(dst)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("mkdir %s: %w", dir, err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/assets/%d", owner, repo, assetID), nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
req.Header = make(http.Header)
|
||||
if gt := os.Getenv("GITHUB_TOKEN"); gt != "" {
|
||||
req.Header.Add("authorization", "token "+gt)
|
||||
}
|
||||
req.Header.Add("accept", "application/octet-stream")
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, os.FileMode(0666))
|
||||
if err != nil {
|
||||
return fmt.Errorf("open file %s: %w", dst, err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err := io.Copy(f, res.Body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *githubReleaseAsset) sign(path string) (string, error) {
|
||||
pass := os.Getenv("SIGNREL_PASSWORD")
|
||||
cmd := exec.Command("gpg", "--armor", "--detach-sign", "--pinentry-mode", "loopback", "--passphrase", pass, path)
|
||||
cap, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "gpg: %s", string(cap))
|
||||
return "", err
|
||||
}
|
||||
return path + ".asc", nil
|
||||
}
|
||||
|
||||
func (a *githubReleaseAsset) upload(sig string, releaseID int64) error {
|
||||
assetName := filepath.Base(sig)
|
||||
url := fmt.Sprintf("https://uploads.github.com/repos/%s/%s/releases/%d/assets?name=%s", owner, repo, releaseID, assetName)
|
||||
f, err := os.Open(sig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
size, err := f.Seek(0, 2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Seek(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", url, f)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
req.Header = make(http.Header)
|
||||
if gt := os.Getenv("GITHUB_TOKEN"); gt != "" {
|
||||
req.Header.Add("authorization", "token "+gt)
|
||||
}
|
||||
req.Header.Add("content-type", "application/octet-stream")
|
||||
|
||||
req.ContentLength = size
|
||||
req.Header.Add("accept", "application/vnd.github.v3+json")
|
||||
|
||||
// blob, _ := httputil.DumpRequestOut(req, true)
|
||||
// fmt.Printf("%s\n", blob)
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if res.StatusCode == 422 {
|
||||
fmt.Fprintf(os.Stdout, "%s has been already uploaded\n", sig)
|
||||
return nil
|
||||
}
|
||||
|
||||
if res.StatusCode >= 300 {
|
||||
return fmt.Errorf("unexpected http status %d: %s", res.StatusCode, body)
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stdout, "Upload completed: %s\n", body)
|
||||
|
||||
return err
|
||||
}
|
||||
97
main.go
97
main.go
@@ -68,13 +68,14 @@ func main() {
|
||||
err error
|
||||
ghClient *github.Client
|
||||
|
||||
metricsAddr string
|
||||
enableLeaderElection bool
|
||||
leaderElectionId string
|
||||
syncPeriod time.Duration
|
||||
metricsAddr string
|
||||
enableLeaderElection bool
|
||||
runnerStatusUpdateHook bool
|
||||
leaderElectionId string
|
||||
port int
|
||||
syncPeriod time.Duration
|
||||
|
||||
gitHubAPICacheDuration time.Duration
|
||||
defaultScaleDownDelay time.Duration
|
||||
defaultScaleDownDelay time.Duration
|
||||
|
||||
runnerImage string
|
||||
runnerImagePullSecrets stringSlice
|
||||
@@ -98,8 +99,8 @@ func main() {
|
||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
||||
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||
flag.StringVar(&leaderElectionId, "leader-election-id", "actions-runner-controller", "Controller id for leader election.")
|
||||
flag.StringVar(&runnerImage, "runner-image", defaultRunnerImage, "The image name of self-hosted runner container.")
|
||||
flag.StringVar(&dockerImage, "docker-image", defaultDockerImage, "The image name of docker sidecar container.")
|
||||
flag.StringVar(&runnerImage, "runner-image", defaultRunnerImage, "The image name of self-hosted runner container to use by default if one isn't defined in yaml.")
|
||||
flag.StringVar(&dockerImage, "docker-image", defaultDockerImage, "The image name of docker sidecar container to use by default if one isn't defined in yaml.")
|
||||
flag.Var(&runnerImagePullSecrets, "runner-image-pull-secret", "The default image-pull secret name for self-hosted runner container.")
|
||||
flag.StringVar(&dockerRegistryMirror, "docker-registry-mirror", "", "The default Docker Registry Mirror used by runners.")
|
||||
flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.")
|
||||
@@ -111,8 +112,9 @@ func main() {
|
||||
flag.StringVar(&c.BasicauthUsername, "github-basicauth-username", c.BasicauthUsername, "Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API")
|
||||
flag.StringVar(&c.BasicauthPassword, "github-basicauth-password", c.BasicauthPassword, "Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API")
|
||||
flag.StringVar(&c.RunnerGitHubURL, "runner-github-url", c.RunnerGitHubURL, "GitHub URL to be used by runners during registration")
|
||||
flag.DurationVar(&gitHubAPICacheDuration, "github-api-cache-duration", 0, "DEPRECATED: The duration until the GitHub API cache expires. Setting this to e.g. 10m results in the controller tries its best not to make the same API call within 10m to reduce the chance of being rate-limited. Defaults to mostly the same value as sync-period. If you're tweaking this in order to make autoscaling more responsive, you'll probably want to tweak sync-period, too")
|
||||
flag.BoolVar(&runnerStatusUpdateHook, "runner-status-update-hook", false, "Use custom RBAC for runners (role, role binding and service account).")
|
||||
flag.DurationVar(&defaultScaleDownDelay, "default-scale-down-delay", controllers.DefaultScaleDownDelay, "The approximate delay for a scale down followed by a scale up, used to prevent flapping (down->up->down->... loop)")
|
||||
flag.IntVar(&port, "port", 9443, "The port to which the admission webhook endpoint should bind")
|
||||
flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.")
|
||||
flag.Var(&commonRunnerLabels, "common-runner-labels", "Runner labels in the K1=V1,K2=V2,... format that are inherited all the runners created by the controller. See https://github.com/actions-runner-controller/actions-runner-controller/issues/321 for more information")
|
||||
flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.")
|
||||
@@ -136,7 +138,7 @@ func main() {
|
||||
MetricsBindAddress: metricsAddr,
|
||||
LeaderElection: enableLeaderElection,
|
||||
LeaderElectionID: leaderElectionId,
|
||||
Port: 9443,
|
||||
Port: port,
|
||||
SyncPeriod: &syncPeriod,
|
||||
Namespace: namespace,
|
||||
})
|
||||
@@ -145,13 +147,19 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
multiClient := controllers.NewMultiGitHubClient(
|
||||
mgr.GetClient(),
|
||||
ghClient,
|
||||
)
|
||||
|
||||
runnerReconciler := &controllers.RunnerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("runner"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
GitHubClient: ghClient,
|
||||
DockerImage: dockerImage,
|
||||
DockerRegistryMirror: dockerRegistryMirror,
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("runner"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
GitHubClient: multiClient,
|
||||
DockerImage: dockerImage,
|
||||
DockerRegistryMirror: dockerRegistryMirror,
|
||||
UseRunnerStatusUpdateHook: runnerStatusUpdateHook,
|
||||
// Defaults for self-hosted runner containers
|
||||
RunnerImage: runnerImage,
|
||||
RunnerImagePullSecrets: runnerImagePullSecrets,
|
||||
@@ -163,10 +171,9 @@ func main() {
|
||||
}
|
||||
|
||||
runnerReplicaSetReconciler := &controllers.RunnerReplicaSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("runnerreplicaset"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
GitHubClient: ghClient,
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("runnerreplicaset"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}
|
||||
|
||||
if err = runnerReplicaSetReconciler.SetupWithManager(mgr); err != nil {
|
||||
@@ -193,32 +200,27 @@ func main() {
|
||||
CommonRunnerLabels: commonRunnerLabels,
|
||||
DockerImage: dockerImage,
|
||||
DockerRegistryMirror: dockerRegistryMirror,
|
||||
GitHubBaseURL: ghClient.GithubBaseURL,
|
||||
GitHubClient: multiClient,
|
||||
// Defaults for self-hosted runner containers
|
||||
RunnerImage: runnerImage,
|
||||
RunnerImagePullSecrets: runnerImagePullSecrets,
|
||||
RunnerImage: runnerImage,
|
||||
RunnerImagePullSecrets: runnerImagePullSecrets,
|
||||
UseRunnerStatusUpdateHook: runnerStatusUpdateHook,
|
||||
}
|
||||
|
||||
if err = runnerSetReconciler.SetupWithManager(mgr); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "RunnerSet")
|
||||
os.Exit(1)
|
||||
}
|
||||
if gitHubAPICacheDuration == 0 {
|
||||
gitHubAPICacheDuration = syncPeriod - 10*time.Second
|
||||
}
|
||||
|
||||
if gitHubAPICacheDuration < 0 {
|
||||
gitHubAPICacheDuration = 0
|
||||
}
|
||||
|
||||
log.Info(
|
||||
"Initializing actions-runner-controller",
|
||||
"github-api-cache-duration", gitHubAPICacheDuration,
|
||||
"default-scale-down-delay", defaultScaleDownDelay,
|
||||
"sync-period", syncPeriod,
|
||||
"runner-image", runnerImage,
|
||||
"docker-image", dockerImage,
|
||||
"default-runner-image", runnerImage,
|
||||
"default-docker-image", dockerImage,
|
||||
"common-runnner-labels", commonRunnerLabels,
|
||||
"leader-election-enabled", enableLeaderElection,
|
||||
"leader-election-id", leaderElectionId,
|
||||
"watch-namespace", namespace,
|
||||
)
|
||||
|
||||
@@ -226,8 +228,7 @@ func main() {
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("horizontalrunnerautoscaler"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
GitHubClient: ghClient,
|
||||
CacheDuration: gitHubAPICacheDuration,
|
||||
GitHubClient: multiClient,
|
||||
DefaultScaleDownDelay: defaultScaleDownDelay,
|
||||
}
|
||||
|
||||
@@ -235,7 +236,19 @@ func main() {
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("runnerpod"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
GitHubClient: ghClient,
|
||||
GitHubClient: multiClient,
|
||||
}
|
||||
|
||||
runnerPersistentVolumeReconciler := &controllers.RunnerPersistentVolumeReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("runnerpersistentvolume"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}
|
||||
|
||||
runnerPersistentVolumeClaimReconciler := &controllers.RunnerPersistentVolumeClaimReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: log.WithName("runnerpersistentvolumeclaim"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}
|
||||
|
||||
if err = runnerPodReconciler.SetupWithManager(mgr); err != nil {
|
||||
@@ -248,6 +261,16 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = runnerPersistentVolumeReconciler.SetupWithManager(mgr); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "RunnerPersistentVolume")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = runnerPersistentVolumeClaimReconciler.SetupWithManager(mgr); err != nil {
|
||||
log.Error(err, "unable to create controller", "controller", "RunnerPersistentVolumeClaim")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&actionsv1alpha1.Runner{}).SetupWebhookWithManager(mgr); err != nil {
|
||||
log.Error(err, "unable to create webhook", "webhook", "Runner")
|
||||
os.Exit(1)
|
||||
@@ -264,7 +287,7 @@ func main() {
|
||||
|
||||
injector := &controllers.PodRunnerTokenInjector{
|
||||
Client: mgr.GetClient(),
|
||||
GitHubClient: ghClient,
|
||||
GitHubClient: multiClient,
|
||||
Log: ctrl.Log.WithName("webhook").WithName("PodRunnerTokenInjector"),
|
||||
}
|
||||
if err = injector.SetupWithManager(mgr); err != nil {
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
gogithub "github.com/google/go-github/v39/github"
|
||||
gogithub "github.com/google/go-github/v45/github"
|
||||
)
|
||||
|
||||
type server struct {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user