mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
121 Commits
v0.23.0
...
actions-ru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
edbdef8d20 | ||
|
|
a190fa97bb | ||
|
|
bfc5ea4727 | ||
|
|
5a9e8545aa | ||
|
|
4446ba57e1 | ||
|
|
d62c8a4697 | ||
|
|
946d5b1fa7 | ||
|
|
da6b07660e | ||
|
|
e3deb0d752 | ||
|
|
82641e5036 | ||
|
|
2fe6adf5b7 | ||
|
|
736126b793 | ||
|
|
6abf5bbac8 | ||
|
|
dc4f116bda | ||
|
|
cda10fd243 | ||
|
|
b5d1a63bdf | ||
|
|
6f3e23973d | ||
|
|
a517c1ff66 | ||
|
|
9b28e633c1 | ||
|
|
8161136cbd | ||
|
|
a9ac5a1cbf | ||
|
|
d4f35cff4f | ||
|
|
f661249f07 | ||
|
|
73e430ce54 | ||
|
|
858ef8979d | ||
|
|
1ce0a183a6 | ||
|
|
63935d2053 | ||
|
|
fc63d6d26e | ||
|
|
5ea08411e6 | ||
|
|
067ed2e5ec | ||
|
|
d86bd2bcd7 | ||
|
|
ddd417f756 | ||
|
|
0386c0734c | ||
|
|
af96de6184 | ||
|
|
abb8615796 | ||
|
|
bc7a3cab1b | ||
|
|
e2c8163b8c | ||
|
|
84d16c1c12 | ||
|
|
071898c96b | ||
|
|
f24e2fa44e | ||
|
|
3c7d3d6b57 | ||
|
|
23f091d7fa | ||
|
|
667764e027 | ||
|
|
de693c4191 | ||
|
|
510fc9c834 | ||
|
|
7fd5e24961 | ||
|
|
9974b1a2b7 | ||
|
|
bd91b73fd9 | ||
|
|
a7ae910ee4 | ||
|
|
2733c36d0e | ||
|
|
0ef9a22cd4 | ||
|
|
933b0c7888 | ||
|
|
1b7ec33135 | ||
|
|
a62882d243 | ||
|
|
0cd13fe51d | ||
|
|
01c8dc237e | ||
|
|
7c4db63718 | ||
|
|
3d88b9630a | ||
|
|
1152e6b31d | ||
|
|
ac27df8301 | ||
|
|
9dd26168d6 | ||
|
|
18bfb28c0b | ||
|
|
84210e900b | ||
|
|
ef3313d147 | ||
|
|
c7eea169ad | ||
|
|
63be0223ad | ||
|
|
5bbea772f7 | ||
|
|
2aa3f1e142 | ||
|
|
3e988afc09 | ||
|
|
84210f3d2b | ||
|
|
536692181b | ||
|
|
23403172cb | ||
|
|
8a8ec43364 | ||
|
|
78c01fd31d | ||
|
|
bf45aa9f6b | ||
|
|
b5aa1750bb | ||
|
|
cdc9d20e7a | ||
|
|
8035d6d9f8 | ||
|
|
65f7ee92a6 | ||
|
|
fca8a538db | ||
|
|
95ddc77245 | ||
|
|
b5194fd75a | ||
|
|
adf69bbea0 | ||
|
|
b43ef70ac6 | ||
|
|
f1caebbaf0 | ||
|
|
ede28f5046 | ||
|
|
f08ab1490d | ||
|
|
772ca57056 | ||
|
|
51b13e3bab | ||
|
|
81017b130f | ||
|
|
bdbcf66569 | ||
|
|
0e15a78541 | ||
|
|
f85c3d06d9 | ||
|
|
51ba7d7160 | ||
|
|
759349de11 | ||
|
|
3014e98681 | ||
|
|
5f4be6a883 | ||
|
|
b98f470a70 | ||
|
|
e46b90f758 | ||
|
|
3a7e8c844b | ||
|
|
65a67ee61c | ||
|
|
215ba36fd1 | ||
|
|
27774b47bd | ||
|
|
fbde2b9a41 | ||
|
|
212098183a | ||
|
|
4a5097d8cf | ||
|
|
9c57d085f8 | ||
|
|
d6622f9369 | ||
|
|
3b67ee727f | ||
|
|
e6bddcd238 | ||
|
|
f60e57d789 | ||
|
|
3ca1152420 | ||
|
|
e94fa19843 | ||
|
|
99832d7104 | ||
|
|
289bcd8b64 | ||
|
|
5e8cba82c2 | ||
|
|
dabbc99c78 | ||
|
|
d01595cfbc | ||
|
|
c1e5829b03 | ||
|
|
800d6bd586 | ||
|
|
d3b7f0bf7d |
29
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
29
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -17,6 +17,12 @@ body:
|
|||||||
label: Helm Chart Version
|
label: Helm Chart Version
|
||||||
description: Run `helm list` and see what's shown under CHART VERSION. Any release tags prefixed with `actions-runner-controller-` are for chart releases
|
description: Run `helm list` and see what's shown under CHART VERSION. Any release tags prefixed with `actions-runner-controller-` are for chart releases
|
||||||
placeholder: ex. 0.11.0
|
placeholder: ex. 0.11.0
|
||||||
|
- type: input
|
||||||
|
id: cert-manager-version
|
||||||
|
attributes:
|
||||||
|
label: CertManager Version
|
||||||
|
description: Run `kubectl get po -o yaml $CERT_MANAGER_POD` and see the image tag, or run `helm list` and see what's shown under APP VERSION for your cert-manager Helm release.
|
||||||
|
placeholder: ex. 1.8
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
id: deployment-method
|
id: deployment-method
|
||||||
attributes:
|
attributes:
|
||||||
@@ -29,6 +35,17 @@ body:
|
|||||||
- Other
|
- Other
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: cert-manager
|
||||||
|
attributes:
|
||||||
|
label: cert-manager installation
|
||||||
|
description: Confirm that you've installed cert-manager correctly by answering a few questions
|
||||||
|
placeholder: |
|
||||||
|
- Did you follow https://github.com/actions-runner-controller/actions-runner-controller#installation? If not, describe the installation process so that we can reproduce your environment.
|
||||||
|
- Are you sure you've installed cert-manager from an official source?
|
||||||
|
(Note that we won't provide user support for cert-manager itself. Make sure cert-manager is fully working before testing ARC or reporting a bug
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checks
|
id: checks
|
||||||
attributes:
|
attributes:
|
||||||
@@ -41,7 +58,7 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: My actions-runner-controller version (v0.x.y) does support the feature
|
- label: My actions-runner-controller version (v0.x.y) does support the feature
|
||||||
required: true
|
required: true
|
||||||
- label: I've already upgraded ARC to the latest and it didn't fix the issue
|
- label: I've already upgraded ARC (including the CRDs, see charts/actions-runner-controller/docs/UPGRADING.md for details) to the latest and it didn't fix the issue
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: resource-definitions
|
id: resource-definitions
|
||||||
@@ -113,9 +130,11 @@ body:
|
|||||||
id: controller-logs
|
id: controller-logs
|
||||||
attributes:
|
attributes:
|
||||||
label: Controller Logs
|
label: Controller Logs
|
||||||
description: "Include logs from `actions-runner-controller`'s controller-manager pod"
|
description: "NEVER EVER OMIT THIS! Include logs from `actions-runner-controller`'s controller-manager pod"
|
||||||
render: shell
|
render: shell
|
||||||
placeholder: |
|
placeholder: |
|
||||||
|
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||||
|
|
||||||
To grab controller logs:
|
To grab controller logs:
|
||||||
|
|
||||||
# Set NS according to your setup
|
# Set NS according to your setup
|
||||||
@@ -125,8 +144,6 @@ body:
|
|||||||
kubectl -n $NS get po
|
kubectl -n $NS get po
|
||||||
|
|
||||||
kubectl -n $NS logs $POD_NAME > arc.log
|
kubectl -n $NS logs $POD_NAME > arc.log
|
||||||
|
|
||||||
Upload it to e.g. https://gist.github.com/ and paste the link to it here.
|
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
@@ -136,6 +153,8 @@ body:
|
|||||||
description: "Include logs from runner pod(s)"
|
description: "Include logs from runner pod(s)"
|
||||||
render: shell
|
render: shell
|
||||||
placeholder: |
|
placeholder: |
|
||||||
|
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||||
|
|
||||||
To grab the runner pod logs:
|
To grab the runner pod logs:
|
||||||
|
|
||||||
# Set NS according to your setup. It should match your RunnerDeployment's metadata.namespace.
|
# Set NS according to your setup. It should match your RunnerDeployment's metadata.namespace.
|
||||||
@@ -146,8 +165,6 @@ body:
|
|||||||
|
|
||||||
kubectl -n $NS logs $POD_NAME -c runner > runnerpod_runner.log
|
kubectl -n $NS logs $POD_NAME -c runner > runnerpod_runner.log
|
||||||
kubectl -n $NS logs $POD_NAME -c docker > runnerpod_docker.log
|
kubectl -n $NS logs $POD_NAME -c docker > runnerpod_docker.log
|
||||||
|
|
||||||
Upload it to e.g. https://gist.github.com/ and paste the link to it here.
|
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
|
|||||||
@@ -29,23 +29,23 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v1
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v2
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
|
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
if: ${{ github.ref == 'master' && github.event.pull_request.merged == true }}
|
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' }}
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ inputs.username }}
|
username: ${{ inputs.username }}
|
||||||
password: ${{ inputs.password }}
|
password: ${{ inputs.password }}
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@v1
|
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' }}
|
||||||
if: ${{ github.ref == 'master' && github.event.pull_request.merged == true }}
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ inputs.ghcr_username }}
|
username: ${{ inputs.ghcr_username }}
|
||||||
|
|||||||
25
.github/lock.yml
vendored
25
.github/lock.yml
vendored
@@ -1,25 +0,0 @@
|
|||||||
# Configuration for Lock Threads
|
|
||||||
# Repo: https://github.com/dessant/lock-threads-app
|
|
||||||
# App: https://github.com/apps/lock
|
|
||||||
|
|
||||||
# Number of days of inactivity before a closed issue or pull request is locked
|
|
||||||
daysUntilLock: 7
|
|
||||||
|
|
||||||
# Skip issues and pull requests created before a given timestamp. Timestamp must
|
|
||||||
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
|
|
||||||
skipCreatedBefore: false
|
|
||||||
|
|
||||||
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
|
|
||||||
exemptLabels: []
|
|
||||||
|
|
||||||
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
|
||||||
lockLabel: false
|
|
||||||
|
|
||||||
# Comment to post before locking. Set to `false` to disable
|
|
||||||
lockComment: >
|
|
||||||
This thread has been automatically locked since there has not been
|
|
||||||
any recent activity after it was closed. Please open a new issue for
|
|
||||||
related bugs.
|
|
||||||
|
|
||||||
# Assign `resolved` as the reason for locking. Set to `false` to disable
|
|
||||||
setLockReason: true
|
|
||||||
6
.github/renovate.json5
vendored
6
.github/renovate.json5
vendored
@@ -13,7 +13,7 @@
|
|||||||
{
|
{
|
||||||
// use https://github.com/actions/runner/releases
|
// use https://github.com/actions/runner/releases
|
||||||
"fileMatch": [
|
"fileMatch": [
|
||||||
".github/workflows/runners.yml"
|
".github/workflows/runners.yaml"
|
||||||
],
|
],
|
||||||
"matchStrings": ["RUNNER_VERSION: +(?<currentValue>.*?)\\n"],
|
"matchStrings": ["RUNNER_VERSION: +(?<currentValue>.*?)\\n"],
|
||||||
"depNameTemplate": "actions/runner",
|
"depNameTemplate": "actions/runner",
|
||||||
@@ -30,8 +30,8 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"fileMatch": [
|
"fileMatch": [
|
||||||
"runner/Dockerfile",
|
"runner/actions-runner.dockerfile",
|
||||||
"runner/Dockerfile.dindrunner"
|
"runner/actions-runner-dind.dockerfile"
|
||||||
],
|
],
|
||||||
"matchStrings": ["RUNNER_VERSION=+(?<currentValue>.*?)\\n"],
|
"matchStrings": ["RUNNER_VERSION=+(?<currentValue>.*?)\\n"],
|
||||||
"depNameTemplate": "actions/runner",
|
"depNameTemplate": "actions/runner",
|
||||||
|
|||||||
@@ -1,26 +1,28 @@
|
|||||||
name: Publish Controller Image
|
name: Publish ARC
|
||||||
|
|
||||||
on:
|
on:
|
||||||
release:
|
release:
|
||||||
types: [published]
|
types:
|
||||||
|
- published
|
||||||
|
|
||||||
|
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
packages: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
release-controller:
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Release
|
name: Release
|
||||||
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||||
steps:
|
steps:
|
||||||
- name: Set outputs
|
|
||||||
id: vars
|
|
||||||
run: echo ::set-output name=sha_short::${GITHUB_SHA::7}
|
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.17.7'
|
go-version: '1.18.2'
|
||||||
|
|
||||||
- name: Install tools
|
- name: Install tools
|
||||||
run: |
|
run: |
|
||||||
@@ -39,25 +41,20 @@ jobs:
|
|||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
run: make github-release
|
run: |
|
||||||
|
make github-release
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Setup Docker Environment
|
||||||
uses: docker/setup-qemu-action@v1
|
id: vars
|
||||||
|
uses: ./.github/actions/setup-docker-environment
|
||||||
- name: Set up Docker Buildx
|
|
||||||
id: buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
with:
|
with:
|
||||||
version: latest
|
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||||
|
|
||||||
- name: Login to DockerHub
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKER_USER }}
|
|
||||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
ghcr_username: ${{ github.actor }}
|
||||||
|
ghcr_password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build and Push
|
- name: Build and Push
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
file: Dockerfile
|
file: Dockerfile
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
@@ -66,4 +63,8 @@ jobs:
|
|||||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
|
||||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}
|
||||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}-${{ steps.vars.outputs.sha_short }}
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}-${{ steps.vars.outputs.sha_short }}
|
||||||
|
ghcr.io/actions-runner-controller/actions-runner-controller:latest
|
||||||
|
ghcr.io/actions-runner-controller/actions-runner-controller:${{ env.VERSION }}
|
||||||
|
ghcr.io/actions-runner-controller/actions-runner-controller:${{ env.VERSION }}-${{ steps.vars.outputs.sha_short }}
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
58
.github/workflows/publish-canary.yaml
vendored
Normal file
58
.github/workflows/publish-canary.yaml
vendored
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
name: Publish Canary Image
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths-ignore:
|
||||||
|
- '**.md'
|
||||||
|
- '.github/ISSUE_TEMPLATE/**'
|
||||||
|
- '.github/workflows/validate-chart.yaml'
|
||||||
|
- '.github/workflows/publish-chart.yaml'
|
||||||
|
- '.github/workflows/publish-arc.yaml'
|
||||||
|
- '.github/workflows/runners.yaml'
|
||||||
|
- '.github/workflows/validate-entrypoint.yaml'
|
||||||
|
- '.github/renovate.*'
|
||||||
|
- 'runner/**'
|
||||||
|
- '.gitignore'
|
||||||
|
- 'PROJECT'
|
||||||
|
- 'LICENSE'
|
||||||
|
- 'Makefile'
|
||||||
|
|
||||||
|
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
canary-build:
|
||||||
|
name: Build and Publish Canary Image
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Setup Docker Environment
|
||||||
|
id: vars
|
||||||
|
uses: ./.github/actions/setup-docker-environment
|
||||||
|
with:
|
||||||
|
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
ghcr_username: ${{ github.actor }}
|
||||||
|
ghcr_password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
# Considered unstable builds
|
||||||
|
# See Issue #285, PR #286, and PR #323 for more information
|
||||||
|
- name: Build and Push
|
||||||
|
uses: docker/build-push-action@v3
|
||||||
|
with:
|
||||||
|
file: Dockerfile
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:canary
|
||||||
|
ghcr.io/actions-runner-controller/actions-runner-controller:canary
|
||||||
|
cache-from: type=gha,scope=arc-canary
|
||||||
|
cache-to: type=gha,mode=max,scope=arc-canary
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
name: Publish helm chart
|
name: Publish Helm Chart
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@@ -6,7 +6,7 @@ on:
|
|||||||
- master
|
- master
|
||||||
paths:
|
paths:
|
||||||
- 'charts/**'
|
- 'charts/**'
|
||||||
- '.github/workflows/on-push-master-publish-chart.yml'
|
- '.github/workflows/publish-chart.yaml'
|
||||||
- '!charts/actions-runner-controller/docs/**'
|
- '!charts/actions-runner-controller/docs/**'
|
||||||
- '!**.md'
|
- '!**.md'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@@ -15,10 +15,13 @@ env:
|
|||||||
KUBE_SCORE_VERSION: 1.10.0
|
KUBE_SCORE_VERSION: 1.10.0
|
||||||
HELM_VERSION: v3.8.0
|
HELM_VERSION: v3.8.0
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint-chart:
|
lint-chart:
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Lint Chart
|
name: Lint Chart
|
||||||
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
|
publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
|
||||||
steps:
|
steps:
|
||||||
@@ -28,7 +31,7 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Helm
|
- name: Set up Helm
|
||||||
uses: azure/setup-helm@v2.1
|
uses: azure/setup-helm@v3.0
|
||||||
with:
|
with:
|
||||||
version: ${{ env.HELM_VERSION }}
|
version: ${{ env.HELM_VERSION }}
|
||||||
|
|
||||||
@@ -49,9 +52,9 @@ jobs:
|
|||||||
--enable-optional-test container-security-context-readonlyrootfilesystem
|
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||||
|
|
||||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||||
- uses: actions/setup-python@v3
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: 3.7
|
python-version: '3.7'
|
||||||
|
|
||||||
- name: Set up chart-testing
|
- name: Set up chart-testing
|
||||||
uses: helm/chart-testing-action@v2.2.1
|
uses: helm/chart-testing-action@v2.2.1
|
||||||
@@ -65,22 +68,23 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Run chart-testing (lint)
|
- name: Run chart-testing (lint)
|
||||||
run: ct lint --config charts/.ci/ct-config.yaml
|
run: |
|
||||||
|
ct lint --config charts/.ci/ct-config.yaml
|
||||||
|
|
||||||
- name: Create kind cluster
|
- name: Create kind cluster
|
||||||
uses: helm/kind-action@v1.2.0
|
|
||||||
if: steps.list-changed.outputs.changed == 'true'
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
uses: helm/kind-action@v1.3.0
|
||||||
|
|
||||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||||
- name: Install cert-manager
|
- name: Install cert-manager
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
run: |
|
run: |
|
||||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||||
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||||
if: steps.list-changed.outputs.changed == 'true'
|
|
||||||
|
|
||||||
- name: Run chart-testing (install)
|
- name: Run chart-testing (install)
|
||||||
run: ct install --config charts/.ci/ct-config.yaml
|
|
||||||
if: steps.list-changed.outputs.changed == 'true'
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
run: ct install --config charts/.ci/ct-config.yaml
|
||||||
|
|
||||||
# WARNING: This relies on the latest release being inat the top of the JSON from GitHub and a clean chart.yaml
|
# WARNING: This relies on the latest release being inat the top of the JSON from GitHub and a clean chart.yaml
|
||||||
- name: Check if Chart Publish is Needed
|
- name: Check if Chart Publish is Needed
|
||||||
@@ -99,8 +103,11 @@ jobs:
|
|||||||
publish-chart:
|
publish-chart:
|
||||||
if: needs.lint-chart.outputs.publish-chart == 'true'
|
if: needs.lint-chart.outputs.publish-chart == 'true'
|
||||||
needs: lint-chart
|
needs: lint-chart
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Publish Chart
|
name: Publish Chart
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write # for helm/chart-releaser-action to push chart release and create a release
|
||||||
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
32
.github/workflows/run-codeql.yaml
vendored
Normal file
32
.github/workflows/run-codeql.yaml
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
name: Run CodeQL
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
schedule:
|
||||||
|
- cron: '30 1 * * 0'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
security-events: write
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v2
|
||||||
|
with:
|
||||||
|
languages: go
|
||||||
|
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v2
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v2
|
||||||
@@ -1,12 +1,18 @@
|
|||||||
name: 'Close stale issues and PRs'
|
name: Run Stale Bot
|
||||||
on:
|
on:
|
||||||
schedule:
|
schedule:
|
||||||
# 01:30 every day
|
|
||||||
- cron: '30 1 * * *'
|
- cron: '30 1 * * *'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
stale:
|
stale:
|
||||||
|
name: Run Stale
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write # for actions/stale to close stale issues
|
||||||
|
pull-requests: write # for actions/stale to close stale PRs
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@v5
|
- uses: actions/stale@v5
|
||||||
with:
|
with:
|
||||||
@@ -6,27 +6,37 @@ on:
|
|||||||
- opened
|
- opened
|
||||||
- synchronize
|
- synchronize
|
||||||
- reopened
|
- reopened
|
||||||
- closed
|
|
||||||
branches:
|
branches:
|
||||||
- 'master'
|
- 'master'
|
||||||
paths:
|
paths:
|
||||||
- 'runner/**'
|
- 'runner/**'
|
||||||
- '!runner/Makefile'
|
- '!runner/Makefile'
|
||||||
- .github/workflows/runners.yml
|
- '.github/workflows/runners.yaml'
|
||||||
|
- '!**.md'
|
||||||
|
# We must do a trigger on a push: instead of a types: closed so GitHub Secrets
|
||||||
|
# are available to the workflow run
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
paths:
|
||||||
|
- 'runner/**'
|
||||||
|
- '!runner/Makefile'
|
||||||
|
- '.github/workflows/runners.yaml'
|
||||||
- '!**.md'
|
- '!**.md'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUNNER_VERSION: 2.290.1
|
RUNNER_VERSION: 2.294.0
|
||||||
DOCKER_VERSION: 20.10.12
|
DOCKER_VERSION: 20.10.12
|
||||||
|
RUNNER_CONTAINER_HOOKS_VERSION: 0.1.2
|
||||||
DOCKERHUB_USERNAME: summerwind
|
DOCKERHUB_USERNAME: summerwind
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build-runners:
|
||||||
|
name: Build ${{ matrix.name }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
packages: write
|
packages: write
|
||||||
contents: read
|
contents: read
|
||||||
name: Build ${{ matrix.name }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -34,11 +44,9 @@ jobs:
|
|||||||
- name: actions-runner
|
- name: actions-runner
|
||||||
os-name: ubuntu
|
os-name: ubuntu
|
||||||
os-version: 20.04
|
os-version: 20.04
|
||||||
dockerfile: Dockerfile
|
|
||||||
- name: actions-runner-dind
|
- name: actions-runner-dind
|
||||||
os-name: ubuntu
|
os-name: ubuntu
|
||||||
os-version: 20.04
|
os-version: 20.04
|
||||||
dockerfile: Dockerfile.dindrunner
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -54,15 +62,16 @@ jobs:
|
|||||||
ghcr_password: ${{ secrets.GITHUB_TOKEN }}
|
ghcr_password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build and Push Versioned Tags
|
- name: Build and Push Versioned Tags
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
context: ./runner
|
context: ./runner
|
||||||
file: ./runner/${{ matrix.dockerfile }}
|
file: ./runner/${{ matrix.name }}.dockerfile
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
push: ${{ github.ref == 'master' && github.event.pull_request.merged == true }}
|
push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }}
|
||||||
build-args: |
|
build-args: |
|
||||||
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||||
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||||
|
RUNNER_CONTAINER_HOOKS_VERSION=${{ env.RUNNER_CONTAINER_HOOKS_VERSION }}
|
||||||
tags: |
|
tags: |
|
||||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
||||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
||||||
@@ -70,5 +79,5 @@ jobs:
|
|||||||
ghcr.io/${{ github.repository }}/${{ matrix.name }}:latest
|
ghcr.io/${{ github.repository }}/${{ matrix.name }}:latest
|
||||||
ghcr.io/${{ github.repository }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
ghcr.io/${{ github.repository }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
||||||
ghcr.io/${{ github.repository }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
ghcr.io/${{ github.repository }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
||||||
cache-from: type=gha
|
cache-from: type=gha,scope=build-${{ matrix.name }}
|
||||||
cache-to: type=gha,mode=max
|
cache-to: type=gha,mode=max,scope=build-${{ matrix.name }}
|
||||||
@@ -1,45 +1,59 @@
|
|||||||
name: CI
|
name: Validate ARC
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- .github/workflows/runners.yml
|
|
||||||
- .github/workflows/on-push-lint-charts.yml
|
|
||||||
- .github/workflows/on-push-master-publish-chart.yml
|
|
||||||
- .github/workflows/release.yml
|
|
||||||
- .github/workflows/test-entrypoint.yml
|
|
||||||
- .github/workflows/wip.yml
|
|
||||||
- 'runner/**'
|
|
||||||
- '**.md'
|
- '**.md'
|
||||||
|
- '.github/ISSUE_TEMPLATE/**'
|
||||||
|
- '.github/workflows/publish-canary.yaml'
|
||||||
|
- '.github/workflows/validate-chart.yaml'
|
||||||
|
- '.github/workflows/publish-chart.yaml'
|
||||||
|
- '.github/workflows/runners.yaml'
|
||||||
|
- '.github/workflows/publish-arc.yaml'
|
||||||
|
- '.github/workflows/validate-entrypoint.yaml'
|
||||||
|
- '.github/renovate.*'
|
||||||
|
- 'runner/**'
|
||||||
- '.gitignore'
|
- '.gitignore'
|
||||||
|
- 'PROJECT'
|
||||||
|
- 'LICENSE'
|
||||||
|
- 'Makefile'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test-controller:
|
||||||
|
name: Test ARC
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Test
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
- uses: actions/setup-go@v3
|
|
||||||
|
- name: Set-up Go
|
||||||
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.17.7'
|
go-version: '1.18.2'
|
||||||
check-latest: false
|
check-latest: false
|
||||||
- run: go version
|
|
||||||
- uses: actions/cache@v3
|
- uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
path: ~/go/pkg/mod
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ runner.os }}-go-
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
- name: Install kubebuilder
|
- name: Install kubebuilder
|
||||||
run: |
|
run: |
|
||||||
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
|
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
|
||||||
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
|
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
|
||||||
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
|
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: make test
|
run: |
|
||||||
|
make test
|
||||||
|
|
||||||
- name: Verify manifests are up-to-date
|
- name: Verify manifests are up-to-date
|
||||||
run: |
|
run: |
|
||||||
make manifests
|
make manifests
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
name: Lint and Test Charts
|
name: Validate Helm Chart
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- 'charts/**'
|
- 'charts/**'
|
||||||
- '.github/workflows/on-push-lint-charts.yml'
|
- '.github/workflows/validate-chart.yaml'
|
||||||
- '!charts/actions-runner-controller/docs/**'
|
- '!charts/actions-runner-controller/docs/**'
|
||||||
- '!**.md'
|
- '!**.md'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@@ -12,10 +12,13 @@ env:
|
|||||||
KUBE_SCORE_VERSION: 1.10.0
|
KUBE_SCORE_VERSION: 1.10.0
|
||||||
HELM_VERSION: v3.8.0
|
HELM_VERSION: v3.8.0
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint-test:
|
validate-chart:
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Lint Chart
|
name: Lint Chart
|
||||||
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -23,7 +26,7 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Helm
|
- name: Set up Helm
|
||||||
uses: azure/setup-helm@v2.1
|
uses: azure/setup-helm@v3.0
|
||||||
with:
|
with:
|
||||||
version: ${{ env.HELM_VERSION }}
|
version: ${{ env.HELM_VERSION }}
|
||||||
|
|
||||||
@@ -44,9 +47,9 @@ jobs:
|
|||||||
--enable-optional-test container-security-context-readonlyrootfilesystem
|
--enable-optional-test container-security-context-readonlyrootfilesystem
|
||||||
|
|
||||||
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
# python is a requirement for the chart-testing action below (supports yamllint among other tests)
|
||||||
- uses: actions/setup-python@v3
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: 3.7
|
python-version: '3.7'
|
||||||
|
|
||||||
- name: Set up chart-testing
|
- name: Set up chart-testing
|
||||||
uses: helm/chart-testing-action@v2.2.1
|
uses: helm/chart-testing-action@v2.2.1
|
||||||
@@ -60,18 +63,20 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Run chart-testing (lint)
|
- name: Run chart-testing (lint)
|
||||||
run: ct lint --config charts/.ci/ct-config.yaml
|
run: |
|
||||||
|
ct lint --config charts/.ci/ct-config.yaml
|
||||||
|
|
||||||
- name: Create kind cluster
|
- name: Create kind cluster
|
||||||
uses: helm/kind-action@v1.2.0
|
uses: helm/kind-action@v1.3.0
|
||||||
if: steps.list-changed.outputs.changed == 'true'
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
|
||||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||||
- name: Install cert-manager
|
- name: Install cert-manager
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
run: |
|
run: |
|
||||||
helm repo add jetstack https://charts.jetstack.io --force-update
|
helm repo add jetstack https://charts.jetstack.io --force-update
|
||||||
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
|
||||||
if: steps.list-changed.outputs.changed == 'true'
|
|
||||||
|
|
||||||
- name: Run chart-testing (install)
|
- name: Run chart-testing (install)
|
||||||
run: ct install --config charts/.ci/ct-config.yaml
|
run: |
|
||||||
|
ct install --config charts/.ci/ct-config.yaml
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
name: Unit tests for entrypoint
|
name: Validate Runners
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
@@ -9,13 +9,17 @@ on:
|
|||||||
- 'test/entrypoint/**'
|
- 'test/entrypoint/**'
|
||||||
- '!**.md'
|
- '!**.md'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test-runner-entrypoint:
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Test entrypoint
|
name: Test entrypoint
|
||||||
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
- name: Run unit tests for entrypoint.sh
|
|
||||||
|
- name: Run tests
|
||||||
run: |
|
run: |
|
||||||
make acceptance/runner/entrypoint
|
make acceptance/runner/entrypoint
|
||||||
51
.github/workflows/wip.yml
vendored
51
.github/workflows/wip.yml
vendored
@@ -1,51 +0,0 @@
|
|||||||
name: Publish Canary Image
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
paths-ignore:
|
|
||||||
- .github/workflows/runners.yml
|
|
||||||
- .github/workflows/on-push-lint-charts.yml
|
|
||||||
- .github/workflows/on-push-master-publish-chart.yml
|
|
||||||
- .github/workflows/release.yml
|
|
||||||
- .github/workflows/test-entrypoint.yml
|
|
||||||
- "runner/**"
|
|
||||||
- "**.md"
|
|
||||||
- ".gitignore"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Build and Publish Canary Image
|
|
||||||
env:
|
|
||||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
id: buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
|
|
||||||
- name: Login to DockerHub
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKER_USER }}
|
|
||||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
|
||||||
|
|
||||||
# Considered unstable builds
|
|
||||||
# See Issue #285, PR #286, and PR #323 for more information
|
|
||||||
- name: Build and Push
|
|
||||||
uses: docker/build-push-action@v2
|
|
||||||
with:
|
|
||||||
file: Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
push: true
|
|
||||||
tags: |
|
|
||||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:canary
|
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
# Build the manager binary
|
# Build the manager binary
|
||||||
FROM --platform=$BUILDPLATFORM golang:1.17 as builder
|
FROM --platform=$BUILDPLATFORM golang:1.18.3 as builder
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
|||||||
9
Makefile
9
Makefile
@@ -5,7 +5,7 @@ else
|
|||||||
endif
|
endif
|
||||||
DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1)
|
DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1)
|
||||||
VERSION ?= latest
|
VERSION ?= latest
|
||||||
RUNNER_VERSION ?= 2.290.1
|
RUNNER_VERSION ?= 2.294.0
|
||||||
TARGETPLATFORM ?= $(shell arch)
|
TARGETPLATFORM ?= $(shell arch)
|
||||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||||
RUNNER_TAG ?= ${VERSION}
|
RUNNER_TAG ?= ${VERSION}
|
||||||
@@ -15,7 +15,6 @@ TEST_ORG_REPO ?=
|
|||||||
TEST_EPHEMERAL ?= false
|
TEST_EPHEMERAL ?= false
|
||||||
SYNC_PERIOD ?= 1m
|
SYNC_PERIOD ?= 1m
|
||||||
USE_RUNNERSET ?=
|
USE_RUNNERSET ?=
|
||||||
RUNNER_FEATURE_FLAG_EPHEMERAL ?=
|
|
||||||
KUBECONTEXT ?= kind-acceptance
|
KUBECONTEXT ?= kind-acceptance
|
||||||
CLUSTER ?= acceptance
|
CLUSTER ?= acceptance
|
||||||
CERT_MANAGER_VERSION ?= v1.1.1
|
CERT_MANAGER_VERSION ?= v1.1.1
|
||||||
@@ -57,6 +56,7 @@ GO_TEST_ARGS ?= -short
|
|||||||
# Run tests
|
# Run tests
|
||||||
test: generate fmt vet manifests
|
test: generate fmt vet manifests
|
||||||
go test $(GO_TEST_ARGS) ./... -coverprofile cover.out
|
go test $(GO_TEST_ARGS) ./... -coverprofile cover.out
|
||||||
|
go test -fuzz=Fuzz -fuzztime=10s -run=Fuzz* ./controllers
|
||||||
|
|
||||||
test-with-deps: kube-apiserver etcd kubectl
|
test-with-deps: kube-apiserver etcd kubectl
|
||||||
# See https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants
|
# See https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants
|
||||||
@@ -188,7 +188,6 @@ acceptance/deploy:
|
|||||||
TEST_ORG=${TEST_ORG} TEST_ORG_REPO=${TEST_ORG_REPO} SYNC_PERIOD=${SYNC_PERIOD} \
|
TEST_ORG=${TEST_ORG} TEST_ORG_REPO=${TEST_ORG_REPO} SYNC_PERIOD=${SYNC_PERIOD} \
|
||||||
USE_RUNNERSET=${USE_RUNNERSET} \
|
USE_RUNNERSET=${USE_RUNNERSET} \
|
||||||
TEST_EPHEMERAL=${TEST_EPHEMERAL} \
|
TEST_EPHEMERAL=${TEST_EPHEMERAL} \
|
||||||
RUNNER_FEATURE_FLAG_EPHEMERAL=${RUNNER_FEATURE_FLAG_EPHEMERAL} \
|
|
||||||
acceptance/deploy.sh
|
acceptance/deploy.sh
|
||||||
|
|
||||||
acceptance/tests:
|
acceptance/tests:
|
||||||
@@ -223,7 +222,7 @@ ifeq (, $(wildcard $(GOBIN)/controller-gen))
|
|||||||
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
|
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
|
||||||
cd $$CONTROLLER_GEN_TMP_DIR ;\
|
cd $$CONTROLLER_GEN_TMP_DIR ;\
|
||||||
go mod init tmp ;\
|
go mod init tmp ;\
|
||||||
go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0 ;\
|
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0 ;\
|
||||||
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
|
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
|
||||||
}
|
}
|
||||||
endif
|
endif
|
||||||
@@ -243,7 +242,7 @@ ifeq (, $(wildcard $(GOBIN)/yq))
|
|||||||
YQ_TMP_DIR=$$(mktemp -d) ;\
|
YQ_TMP_DIR=$$(mktemp -d) ;\
|
||||||
cd $$YQ_TMP_DIR ;\
|
cd $$YQ_TMP_DIR ;\
|
||||||
go mod init tmp ;\
|
go mod init tmp ;\
|
||||||
go get github.com/mikefarah/yq/v3@3.4.0 ;\
|
go install github.com/mikefarah/yq/v3@3.4.0 ;\
|
||||||
rm -rf $$YQ_TMP_DIR ;\
|
rm -rf $$YQ_TMP_DIR ;\
|
||||||
}
|
}
|
||||||
endif
|
endif
|
||||||
|
|||||||
382
README.md
382
README.md
@@ -1,11 +1,13 @@
|
|||||||
# actions-runner-controller (ARC)
|
# actions-runner-controller (ARC)
|
||||||
|
|
||||||
|
[](https://bestpractices.coreinfrastructure.org/projects/6061)
|
||||||
[](https://github.com/jonico/awesome-runners)
|
[](https://github.com/jonico/awesome-runners)
|
||||||
|
|
||||||
This controller operates self-hosted runners for GitHub Actions on your Kubernetes cluster.
|
This controller operates self-hosted runners for GitHub Actions on your Kubernetes cluster.
|
||||||
|
|
||||||
ToC:
|
ToC:
|
||||||
|
|
||||||
|
- [People](#people)
|
||||||
- [Status](#status)
|
- [Status](#status)
|
||||||
- [About](#about)
|
- [About](#about)
|
||||||
- [Installation](#installation)
|
- [Installation](#installation)
|
||||||
@@ -28,6 +30,7 @@ ToC:
|
|||||||
- [Autoscaling to/from 0](#autoscaling-tofrom-0)
|
- [Autoscaling to/from 0](#autoscaling-tofrom-0)
|
||||||
- [Scheduled Overrides](#scheduled-overrides)
|
- [Scheduled Overrides](#scheduled-overrides)
|
||||||
- [Runner with DinD](#runner-with-dind)
|
- [Runner with DinD](#runner-with-dind)
|
||||||
|
- [Runner with k8s jobs](#runner-with-k8s-jobs)
|
||||||
- [Additional Tweaks](#additional-tweaks)
|
- [Additional Tweaks](#additional-tweaks)
|
||||||
- [Custom Volume mounts](#custom-volume-mounts)
|
- [Custom Volume mounts](#custom-volume-mounts)
|
||||||
- [Runner Labels](#runner-labels)
|
- [Runner Labels](#runner-labels)
|
||||||
@@ -40,6 +43,20 @@ ToC:
|
|||||||
- [Contributing](#contributing)
|
- [Contributing](#contributing)
|
||||||
|
|
||||||
|
|
||||||
|
## People
|
||||||
|
|
||||||
|
`actions-runner-controller` is an open-source project currently developed and maintained in collaboration with maintainers @mumoshu and @toast-gear, various [contributors](https://github.com/actions-runner-controller/actions-runner-controller/graphs/contributors), and the [awesome community](https://github.com/actions-runner-controller/actions-runner-controller/discussions), mostly in their spare time.
|
||||||
|
|
||||||
|
If you think the project is awesome and it's becoming a basis for your important business, consider [sponsoring us](https://github.com/sponsors/actions-runner-controller)!
|
||||||
|
|
||||||
|
In case you are already the employer of one of contributors, sponsoring via GitHub Sponsors might not be an option. Just support them in other means!
|
||||||
|
|
||||||
|
We don't currently have [any sponsors dedicated to this project yet](https://github.com/sponsors/actions-runner-controller).
|
||||||
|
|
||||||
|
However, [HelloFresh](https://www.hellofreshgroup.com/en/) has recently started sponsoring @mumoshu for this project along with his other works. A part of their sponsorship will enable @mumoshu to add an E2E test to keep ARC even more reliable on AWS. Thank you for your sponsorship!
|
||||||
|
|
||||||
|
[<img src="https://user-images.githubusercontent.com/22009/170898715-07f02941-35ec-418b-8cd4-251b422fa9ac.png" width="219" height="71" />](https://careers.hellofresh.com/)
|
||||||
|
|
||||||
## Status
|
## Status
|
||||||
|
|
||||||
Even though actions-runner-controller is used in production environments, it is still in its early stage of development, hence versioned 0.x.
|
Even though actions-runner-controller is used in production environments, it is still in its early stage of development, hence versioned 0.x.
|
||||||
@@ -59,7 +76,7 @@ By default, actions-runner-controller uses [cert-manager](https://cert-manager.i
|
|||||||
|
|
||||||
- [Installing cert-manager on Kubernetes](https://cert-manager.io/docs/installation/kubernetes/)
|
- [Installing cert-manager on Kubernetes](https://cert-manager.io/docs/installation/kubernetes/)
|
||||||
|
|
||||||
Subsequent to this, install the custom resource definitions and actions-runner-controller with `kubectl` or `helm`. This will create an actions-runner-system namespace in your Kubernetes and deploy the required resources.
|
After installing cert-manager, install the custom resource definitions and actions-runner-controller with `kubectl` or `helm`. This will create an actions-runner-system namespace in your Kubernetes and deploy the required resources.
|
||||||
|
|
||||||
**Kubectl Deployment:**
|
**Kubectl Deployment:**
|
||||||
|
|
||||||
@@ -402,7 +419,7 @@ spec:
|
|||||||
app: example
|
app: example
|
||||||
```
|
```
|
||||||
|
|
||||||
As it is based on `StatefulSet`, `selector` and `template.medatada.labels` it needs to be defined and have the exact same set of labels. `serviceName` must be set to some non-empty string as it is also required by `StatefulSet`.
|
As it is based on `StatefulSet`, `selector` and `template.metadata.labels` it needs to be defined and have the exact same set of labels. `serviceName` must be set to some non-empty string as it is also required by `StatefulSet`.
|
||||||
|
|
||||||
Runner-related fields like `ephemeral`, `repository`, `organization`, `enterprise`, and so on should be written directly under `spec`.
|
Runner-related fields like `ephemeral`, `repository`, `organization`, `enterprise`, and so on should be written directly under `spec`.
|
||||||
|
|
||||||
@@ -444,6 +461,17 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
cpu: "2.0"
|
cpu: "2.0"
|
||||||
memory: "4Gi"
|
memory: "4Gi"
|
||||||
|
# This is an advanced configuration. Don't touch it unless you know what you're doing.
|
||||||
|
securityContext:
|
||||||
|
# Usually, the runner container's privileged field is derived from dockerdWithinRunnerContainer.
|
||||||
|
# But in the case where you need to run privileged job steps even if you don't use docker/don't need dockerd within the runner container,
|
||||||
|
# just specified `privileged: true` like this.
|
||||||
|
# See https://github.com/actions-runner-controller/actions-runner-controller/issues/1282
|
||||||
|
# Do note that specifying `privileged: false` while using dind is very likely to fail, even if you use some vm-based container runtimes
|
||||||
|
# like firecracker and kata. Basically they run containers within dedicated micro vms and so
|
||||||
|
# it's more like you can use `privileged: true` safer with those runtimes.
|
||||||
|
#
|
||||||
|
# privileged: true
|
||||||
- name: docker
|
- name: docker
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
@@ -461,7 +489,6 @@ Under the hood, `RunnerSet` relies on Kubernetes's `StatefulSet` and Mutating We
|
|||||||
**Limitations**
|
**Limitations**
|
||||||
|
|
||||||
* For autoscaling the `RunnerSet` kind only supports pull driven scaling or the `workflow_job` event for webhook driven scaling.
|
* For autoscaling the `RunnerSet` kind only supports pull driven scaling or the `workflow_job` event for webhook driven scaling.
|
||||||
* Whilst `RunnerSets` support all runner modes as well as autoscaling, currently PVs are **NOT** automatically cleaned up as they are still bound to their respective PVCs when a runner is deleted by the controller. This has **major** implications when using `RunnerSets` in the standard runner mode, `ephemeral: true`, see [persistent runners](#persistent-runners) for more details. As a result of this, using the default ephemeral configuration or implementing autoscaling for your `RunnerSets`, you will get a build-up of PVCs and PVs without some sort of custom solution for cleaning up.
|
|
||||||
|
|
||||||
### Persistent Runners
|
### Persistent Runners
|
||||||
|
|
||||||
@@ -692,7 +719,7 @@ With the above example, the webhook server scales `example-runners` by `1` repli
|
|||||||
|
|
||||||
Of note is the `HRA.spec.scaleUpTriggers[].duration` attribute. This attribute is used to calculate if the replica number added via the trigger is expired or not. On each reconciliation loop, the controller sums up all the non-expiring replica numbers from previous scale-up triggers. It then compares the summed desired replica number against the current replica number. If the summed desired replica number > the current number then it means the replica count needs to scale up.
|
Of note is the `HRA.spec.scaleUpTriggers[].duration` attribute. This attribute is used to calculate if the replica number added via the trigger is expired or not. On each reconciliation loop, the controller sums up all the non-expiring replica numbers from previous scale-up triggers. It then compares the summed desired replica number against the current replica number. If the summed desired replica number > the current number then it means the replica count needs to scale up.
|
||||||
|
|
||||||
As mentioned previously, the `scaleDownDelaySecondsAfterScaleOut` property has the final say still. If the latest scale-up time + the anti-flapping duration is later than the current time, it doesn’t immediately scale up and instead retries the calculation again later to see if it needs to scale yet.
|
As mentioned previously, the `scaleDownDelaySecondsAfterScaleOut` property has the final say still. If the latest scale-up time + the anti-flapping duration is later than the current time, it doesn’t immediately scale down and instead retries the calculation again later to see if it needs to scale yet.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -700,31 +727,164 @@ The primary benefit of autoscaling on Webhooks compared to the pull driven scali
|
|||||||
|
|
||||||
> You can learn the implementation details in [#282](https://github.com/actions-runner-controller/actions-runner-controller/pull/282)
|
> You can learn the implementation details in [#282](https://github.com/actions-runner-controller/actions-runner-controller/pull/282)
|
||||||
|
|
||||||
|
##### Install with Helm
|
||||||
|
|
||||||
To enable this feature, you first need to install the GitHub webhook server. To install via our Helm chart,
|
To enable this feature, you first need to install the GitHub webhook server. To install via our Helm chart,
|
||||||
_[see the values documentation for all configuration options](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/charts/actions-runner-controller/README.md)_
|
_[see the values documentation for all configuration options](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/charts/actions-runner-controller/README.md)_
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ helm upgrade --install --namespace actions-runner-system --create-namespace \
|
$ helm upgrade --install --namespace actions-runner-system --create-namespace \
|
||||||
--wait actions-runner-controller actions-runner-controller/actions-runner-controller \
|
--wait actions-runner-controller actions-runner-controller/actions-runner-controller \
|
||||||
--set "githubWebhookServer.enabled=true,githubWebhookServer.ports[0].nodePort=33080"
|
--set "githubWebhookServer.enabled=true,service.type=NodePort,githubWebhookServer.ports[0].nodePort=33080"
|
||||||
```
|
```
|
||||||
|
|
||||||
The above command will result in exposing the node port 33080 for Webhook events. Usually, you need to create an
|
The above command will result in exposing the node port 33080 for Webhook events.
|
||||||
external load balancer targeted to the node port, and register the hostname or the IP address of the external load balancer
|
Usually, you need to create an external load balancer targeted to the node port,
|
||||||
to the GitHub Webhook.
|
and register the hostname or the IP address of the external load balancer to the GitHub Webhook.
|
||||||
|
|
||||||
Once you were able to confirm that the Webhook server is ready and running from GitHub - this is usually verified by the
|
**With a custom Kubernetes ingress controller:**
|
||||||
GitHub sending PING events to the Webhook server - create or update your `HorizontalRunnerAutoscaler` resources
|
|
||||||
by learning the following configuration examples.
|
> **CAUTION:** The Kubernetes ingress controllers described below is just a suggestion from the community and
|
||||||
|
> the ARC team will not provide any user support for ingress controllers as it's not a part of this project.
|
||||||
|
>
|
||||||
|
> The following guide on creating an ingress has been contributed by the awesome ARC community and is provided here as-is.
|
||||||
|
> You may, however, still be able to ask for help on the community on GitHub Discussions if you have any problems.
|
||||||
|
|
||||||
|
Kubernetes provides `Ingress` resources to let you configure your ingress controller to expose a Kubernetes service.
|
||||||
|
If you plan to expose ARC via Ingress, you might not be required to make it a `NodePort` service
|
||||||
|
(although nothing would prevent an ingress controller to expose NodePort services too):
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ helm upgrade --install --namespace actions-runner-system --create-namespace \
|
||||||
|
--wait actions-runner-controller actions-runner-controller/actions-runner-controller \
|
||||||
|
--set "githubWebhookServer.enabled=true"
|
||||||
|
```
|
||||||
|
|
||||||
|
The command above will create a new deployment and a service for receiving Github Webhooks on the `actions-runner-system` namespace.
|
||||||
|
|
||||||
|
Now we need to expose this service so that GitHub can send these webhooks over the network with TSL protection.
|
||||||
|
|
||||||
|
You can do it in any way you prefer, here we'll suggest doing it with a k8s Ingress.
|
||||||
|
For the sake of this example we'll expose this service on the following URL:
|
||||||
|
|
||||||
|
- https://your.domain.com/actions-runner-controller-github-webhook-server
|
||||||
|
|
||||||
|
Where `your.domain.com` should be replaced by your own domain.
|
||||||
|
|
||||||
|
> Note: This step assumes you already have a configured `cert-manager` and domain name for your cluster.
|
||||||
|
|
||||||
|
Let's start by creating an Ingress file called `arc-webhook-server.yaml` with the following contents:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: actions-runner-controller-github-webhook-server
|
||||||
|
namespace: actions-runner-system
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/ingress.class: nginx
|
||||||
|
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
|
||||||
|
spec:
|
||||||
|
tls:
|
||||||
|
- hosts:
|
||||||
|
- your.domain.com
|
||||||
|
secretName: your-tls-secret-name
|
||||||
|
rules:
|
||||||
|
- http:
|
||||||
|
paths:
|
||||||
|
- path: /actions-runner-controller-github-webhook-server
|
||||||
|
pathType: Prefix
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: actions-runner-controller-github-webhook-server
|
||||||
|
port:
|
||||||
|
number: 80
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure to set the `spec.tls.secretName` to the name of your TLS secret and
|
||||||
|
`spec.tls.hosts[0]` to your own domain.
|
||||||
|
|
||||||
|
Then create this resource on your cluster with the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kubectl apply -n actions-runner-system -f arc-webhook-server.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
**Configuring GitHub for sending webhooks for our newly created webhook server:**
|
||||||
|
|
||||||
|
After this step your webhook server should be ready to start receiving webhooks from GitHub.
|
||||||
|
|
||||||
|
To configure GitHub to start sending you webhooks, go to the settings page of your repository
|
||||||
|
or organization then click on `Webhooks`, then on `Add webhook`.
|
||||||
|
|
||||||
|
There set the "Payload URL" field with the webhook URL you just created,
|
||||||
|
if you followed the example ingress above the URL would be something like this:
|
||||||
|
|
||||||
|
- https://your.domain.com/actions-runner-controller-github-webhook-server
|
||||||
|
|
||||||
|
> Remember to replace `your.domain.com` with your own domain.
|
||||||
|
|
||||||
|
Then click on "let me select individual events" and choose `Workflow Jobs`.
|
||||||
|
|
||||||
|
You may also want to choose the following event(s) if you use it as a scale trigger in your HRA spec:
|
||||||
|
|
||||||
|
- Check runs
|
||||||
|
- Pushes
|
||||||
|
- Pull Requests
|
||||||
|
|
||||||
|
Later you can remove any of these you are not using to reduce the amount of data sent to your server.
|
||||||
|
|
||||||
|
Then click on `Add Webhook`.
|
||||||
|
|
||||||
|
GitHub will then send a `ping` event to your webhook server to check if it is working, if it is you'll see a green V mark
|
||||||
|
alongside your webhook on the Settings -> Webhooks page.
|
||||||
|
|
||||||
|
Once you were able to confirm that the Webhook server is ready and running from GitHub create or update your
|
||||||
|
`HorizontalRunnerAutoscaler` resources by learning the following configuration examples.
|
||||||
|
|
||||||
|
##### Install with Kustomize
|
||||||
|
|
||||||
|
To install this feature using Kustomize, add `github-webhook-server` resources to your `kustomization.yaml` file as in the example below:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
||||||
|
resources:
|
||||||
|
# You should already have this
|
||||||
|
- github.com/actions-runner-controller/actions-runner-controller/config//default?ref=v0.22.2
|
||||||
|
# Add the below!
|
||||||
|
- github.com/actions-runner-controller/actions-runner-controller/config//github-webhook-server?ref=v0.22.2
|
||||||
|
|
||||||
|
Finally, you will have to configure an ingress so that you may configure the webhook in github. An example of such ingress can be find below:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: actions-runners-webhook-server
|
||||||
|
spec:
|
||||||
|
rules:
|
||||||
|
- http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: github-webhook-server
|
||||||
|
port:
|
||||||
|
number: 80
|
||||||
|
pathType: Exact
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Examples
|
||||||
|
|
||||||
- [Example 1: Scale on each `workflow_job` event](#example-1-scale-on-each-workflow_job-event)
|
- [Example 1: Scale on each `workflow_job` event](#example-1-scale-on-each-workflow_job-event)
|
||||||
- [Example 2: Scale up on each `check_run` event](#example-2-scale-up-on-each-check_run-event)
|
- [Example 2: Scale up on each `check_run` event](#example-2-scale-up-on-each-check_run-event)
|
||||||
- [Example 3: Scale on each `pull_request` event against a given set of branches](#example-3-scale-on-each-pull_request-event-against-a-given-set-of-branches)
|
- [Example 3: Scale on each `pull_request` event against a given set of branches](#example-3-scale-on-each-pull_request-event-against-a-given-set-of-branches)
|
||||||
- [Example 4: Scale on each `push` event](#example-4-scale-on-each-push-event)
|
- [Example 4: Scale on each `push` event](#example-4-scale-on-each-push-event)
|
||||||
|
|
||||||
**Note:** All these examples should have **minReplicas** & **maxReplicas** as mandatory parameters even for webhook driven scaling.
|
###### Example 1: Scale on each `workflow_job` event
|
||||||
|
|
||||||
##### Example 1: Scale on each `workflow_job` event
|
|
||||||
|
|
||||||
> This feature requires controller version => [v0.20.0](https://github.com/actions-runner-controller/actions-runner-controller/releases/tag/v0.20.0)
|
> This feature requires controller version => [v0.20.0](https://github.com/actions-runner-controller/actions-runner-controller/releases/tag/v0.20.0)
|
||||||
|
|
||||||
@@ -735,6 +895,7 @@ The most flexible webhook GitHub offers is the `workflow_job` webhook, it includ
|
|||||||
This webhook should cover most people's needs, please experiment with this webhook first before considering the others.
|
This webhook should cover most people's needs, please experiment with this webhook first before considering the others.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
kind: RunnerDeployment
|
kind: RunnerDeployment
|
||||||
metadata:
|
metadata:
|
||||||
name: example-runners
|
name: example-runners
|
||||||
@@ -743,8 +904,14 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
repository: example/myrepo
|
repository: example/myrepo
|
||||||
---
|
---
|
||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
kind: HorizontalRunnerAutoscaler
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
metadata:
|
||||||
|
name: example-runners
|
||||||
spec:
|
spec:
|
||||||
|
scaleDownDelaySecondsAfterScaleOut: 300
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 10
|
||||||
scaleTargetRef:
|
scaleTargetRef:
|
||||||
name: example-runners
|
name: example-runners
|
||||||
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
||||||
@@ -761,7 +928,7 @@ You can configure your GitHub webhook settings to only include `Workflows Job` e
|
|||||||
|
|
||||||
Each kind has a `status` of `queued`, `in_progress` and `completed`. With the above configuration, `actions-runner-controller` adds one runner for a `workflow_job` event whose `status` is `queued`. Similarly, it removes one runner for a `workflow_job` event whose `status` is `completed`. The caveat to this to remember is that this scale-down is within the bounds of your `scaleDownDelaySecondsAfterScaleOut` configuration, if this time hasn't passed the scale down will be deferred.
|
Each kind has a `status` of `queued`, `in_progress` and `completed`. With the above configuration, `actions-runner-controller` adds one runner for a `workflow_job` event whose `status` is `queued`. Similarly, it removes one runner for a `workflow_job` event whose `status` is `completed`. The caveat to this to remember is that this scale-down is within the bounds of your `scaleDownDelaySecondsAfterScaleOut` configuration, if this time hasn't passed the scale down will be deferred.
|
||||||
|
|
||||||
##### Example 2: Scale up on each `check_run` event
|
###### Example 2: Scale up on each `check_run` event
|
||||||
|
|
||||||
> Note: This should work almost like https://github.com/philips-labs/terraform-aws-github-runner
|
> Note: This should work almost like https://github.com/philips-labs/terraform-aws-github-runner
|
||||||
|
|
||||||
@@ -778,6 +945,8 @@ spec:
|
|||||||
---
|
---
|
||||||
kind: HorizontalRunnerAutoscaler
|
kind: HorizontalRunnerAutoscaler
|
||||||
spec:
|
spec:
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 10
|
||||||
scaleTargetRef:
|
scaleTargetRef:
|
||||||
name: example-runners
|
name: example-runners
|
||||||
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
||||||
@@ -804,6 +973,8 @@ spec:
|
|||||||
---
|
---
|
||||||
kind: HorizontalRunnerAutoscaler
|
kind: HorizontalRunnerAutoscaler
|
||||||
spec:
|
spec:
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 10
|
||||||
scaleTargetRef:
|
scaleTargetRef:
|
||||||
name: example-runners
|
name: example-runners
|
||||||
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
||||||
@@ -819,7 +990,7 @@ spec:
|
|||||||
duration: "5m"
|
duration: "5m"
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Example 3: Scale on each `pull_request` event against a given set of branches
|
###### Example 3: Scale on each `pull_request` event against a given set of branches
|
||||||
|
|
||||||
To scale up replicas of the runners for `example/myrepo` by 1 for 5 minutes on each `pull_request` against the `main` or `develop` branch you write manifests like the below:
|
To scale up replicas of the runners for `example/myrepo` by 1 for 5 minutes on each `pull_request` against the `main` or `develop` branch you write manifests like the below:
|
||||||
|
|
||||||
@@ -834,6 +1005,8 @@ spec:
|
|||||||
---
|
---
|
||||||
kind: HorizontalRunnerAutoscaler
|
kind: HorizontalRunnerAutoscaler
|
||||||
spec:
|
spec:
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 10
|
||||||
scaleTargetRef:
|
scaleTargetRef:
|
||||||
name: example-runners
|
name: example-runners
|
||||||
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
||||||
@@ -862,6 +1035,8 @@ spec:
|
|||||||
---
|
---
|
||||||
kind: HorizontalRunnerAutoscaler
|
kind: HorizontalRunnerAutoscaler
|
||||||
spec:
|
spec:
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 10
|
||||||
scaleTargetRef:
|
scaleTargetRef:
|
||||||
name: example-runners
|
name: example-runners
|
||||||
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
# Uncomment the below in case the target is not RunnerDeployment but RunnerSet
|
||||||
@@ -990,6 +1165,36 @@ spec:
|
|||||||
|
|
||||||
This also helps with resources, as you don't need to give resources separately to docker and runner.
|
This also helps with resources, as you don't need to give resources separately to docker and runner.
|
||||||
|
|
||||||
|
### Runner with K8s Jobs
|
||||||
|
|
||||||
|
When using the default runner, jobs that use a container will run in docker. This necessitates privileged mode, either on the runner pod or the sidecar container
|
||||||
|
|
||||||
|
By setting the container mode, you can instead invoke these jobs using a [kubernetes implementation](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s) while not executing in privileged mode.
|
||||||
|
|
||||||
|
The runner will dynamically spin up pods and k8s jobs in the runner's namespace to run the workflow, so a `workVolumeClaimTemplate` is required for the runner's working directory, and a service account with the [appropriate permissions.](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s#pre-requisites)
|
||||||
|
|
||||||
|
There are some [limitations](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s#limitations) to this approach, mainly [job containers](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container) are required on all workflows.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# runner.yaml
|
||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: Runner
|
||||||
|
metadata:
|
||||||
|
name: example-runner
|
||||||
|
spec:
|
||||||
|
repository: example/myrepo
|
||||||
|
containerMode: kubernetes
|
||||||
|
serviceAccountName: my-service-account
|
||||||
|
workVolumeClaimTemplate:
|
||||||
|
storageClassName: "my-dynamic-storage-class"
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Gi
|
||||||
|
env: []
|
||||||
|
```
|
||||||
|
|
||||||
### Additional Tweaks
|
### Additional Tweaks
|
||||||
|
|
||||||
You can pass details through the spec selector. Here's an eg. of what you may like to do:
|
You can pass details through the spec selector. Here's an eg. of what you may like to do:
|
||||||
@@ -1007,6 +1212,7 @@ spec:
|
|||||||
annotations:
|
annotations:
|
||||||
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
|
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
|
||||||
spec:
|
spec:
|
||||||
|
priorityClassName: "high"
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/test: ""
|
node-role.kubernetes.io/test: ""
|
||||||
|
|
||||||
@@ -1138,13 +1344,26 @@ spec:
|
|||||||
# This must match the name of a RuntimeClass resource available on the cluster.
|
# This must match the name of a RuntimeClass resource available on the cluster.
|
||||||
# More info: https://kubernetes.io/docs/concepts/containers/runtime-class
|
# More info: https://kubernetes.io/docs/concepts/containers/runtime-class
|
||||||
runtimeClassName: "runc"
|
runtimeClassName: "runc"
|
||||||
|
# This is an advanced configuration. Don't touch it unless you know what you're doing.
|
||||||
|
containers:
|
||||||
|
- name: runner
|
||||||
|
# Usually, the runner container's privileged field is derived from dockerdWithinRunnerContainer.
|
||||||
|
# But in the case where you need to run privileged job steps even if you don't use docker/don't need dockerd within the runner container,
|
||||||
|
# just specified `privileged: true` like this.
|
||||||
|
# See https://github.com/actions-runner-controller/actions-runner-controller/issues/1282
|
||||||
|
# Do note that specifying `privileged: false` while using dind is very likely to fail, even if you use some vm-based container runtimes
|
||||||
|
# like firecracker and kata. Basically they run containers within dedicated micro vms and so
|
||||||
|
# it's more like you can use `privileged: true` safer with those runtimes.
|
||||||
|
#
|
||||||
|
# privileged: true
|
||||||
```
|
```
|
||||||
|
|
||||||
### Custom Volume mounts
|
### Custom Volume mounts
|
||||||
You can configure your own custom volume mounts. For example to have the work/docker data in memory or on NVME SSD, for
|
You can configure your own custom volume mounts. For example to have the work/docker data in memory or on NVME SSD, for
|
||||||
i/o intensive builds. Other custom volume mounts should be possible as well, see [kubernetes documentation](https://kubernetes.io/docs/concepts/storage/volumes/)
|
i/o intensive builds. Other custom volume mounts should be possible as well, see [kubernetes documentation](https://kubernetes.io/docs/concepts/storage/volumes/)
|
||||||
|
|
||||||
**RAM Disk Runner**<br />
|
#### RAM Disk
|
||||||
|
|
||||||
Example how to place the runner work dir, docker sidecar and /tmp within the runner onto a ramdisk.
|
Example how to place the runner work dir, docker sidecar and /tmp within the runner onto a ramdisk.
|
||||||
```yaml
|
```yaml
|
||||||
kind: RunnerDeployment
|
kind: RunnerDeployment
|
||||||
@@ -1170,7 +1389,8 @@ spec:
|
|||||||
emphemeral: true # recommended to not leak data between builds.
|
emphemeral: true # recommended to not leak data between builds.
|
||||||
```
|
```
|
||||||
|
|
||||||
**NVME SSD Runner**<br />
|
#### NVME SSD
|
||||||
|
|
||||||
In this example we provide NVME backed storage for the workdir, docker sidecar and /tmp within the runner.
|
In this example we provide NVME backed storage for the workdir, docker sidecar and /tmp within the runner.
|
||||||
Here we use a working example on GKE, which will provide the NVME disk at /mnt/disks/ssd0. We will be placing the respective volumes in subdirs here and in order to be able to run multiple runners we will use the pod name as a prefix for subdirectories. Also the disk will fill up over time and disk space will not be freed until the node is removed.
|
Here we use a working example on GKE, which will provide the NVME disk at /mnt/disks/ssd0. We will be placing the respective volumes in subdirs here and in order to be able to run multiple runners we will use the pod name as a prefix for subdirectories. Also the disk will fill up over time and disk space will not be freed until the node is removed.
|
||||||
|
|
||||||
@@ -1218,6 +1438,125 @@ spec:
|
|||||||
emphemeral: true # VERY important. otherwise data inside the workdir and /tmp is not cleared between builds
|
emphemeral: true # VERY important. otherwise data inside the workdir and /tmp is not cleared between builds
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Docker image layers caching
|
||||||
|
|
||||||
|
> **Note**: Ensure that the volume mount is added to the container that is running the Docker daemon.
|
||||||
|
|
||||||
|
`docker` stores pulled and built image layers in the [daemon's (note not client)](https://docs.docker.com/get-started/overview/#docker-architecture) [local storage area](https://docs.docker.com/storage/storagedriver/#sharing-promotes-smaller-images) which is usually at `/var/lib/docker`.
|
||||||
|
|
||||||
|
By leveraging RunnerSet's dynamic PV provisioning feature and your CSI driver, you can let ARC maintain a pool of PVs that are
|
||||||
|
reused across runner pods to retain `/var/lib/docker`.
|
||||||
|
|
||||||
|
_Be sure to add the volume mount to the container that is supposed to run the docker daemon._
|
||||||
|
|
||||||
|
By default, ARC creates a sidecar container named `docker` within the runner pod for running the docker daemon. In that case,
|
||||||
|
it's where you need the volume mount so that the manifest looks like:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kind: RunnerSet
|
||||||
|
metadata:
|
||||||
|
name: example
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: docker
|
||||||
|
volumeMounts:
|
||||||
|
- name: var-lib-docker
|
||||||
|
mountPath: /var/lib/docker
|
||||||
|
volumeClaimtemplates:
|
||||||
|
- metadata:
|
||||||
|
name: var-lib-docker
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Mi
|
||||||
|
storageClassName: var-lib-docker
|
||||||
|
```
|
||||||
|
|
||||||
|
With `dockerdWithinRunnerContainer: true`, you need to add the volume mount to the `runner` container.
|
||||||
|
|
||||||
|
#### Go module and build caching
|
||||||
|
|
||||||
|
`Go` is known to cache builds under `$HOME/.cache/go-build` and downloaded modules under `$HOME/pkg/mod`.
|
||||||
|
The module cache dir can be customized by setting `GOMOD_CACHE` so by setting it to somewhere under `$HOME/.cache`,
|
||||||
|
we can have a single PV to host both build and module cache, which might improve Go module downloading and building time.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kind: RunnerSet
|
||||||
|
metadata:
|
||||||
|
name: example
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: runner
|
||||||
|
env:
|
||||||
|
- name: GOMODCACHE
|
||||||
|
value: "/home/runner/.cache/go-mod"
|
||||||
|
volumeMounts:
|
||||||
|
- name: cache
|
||||||
|
mountPath: "/home/runner/.cache"
|
||||||
|
volumeClaimTemplates:
|
||||||
|
- metadata:
|
||||||
|
name: cache
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Mi
|
||||||
|
storageClassName: cache
|
||||||
|
```
|
||||||
|
|
||||||
|
#### PV-backed runner work directory
|
||||||
|
|
||||||
|
ARC works by automatically creating runner pods for running [`actions/runner`](https://github.com/actions/runner) and [running `config.sh`](https://docs.github.com/en/actions/hosting-your-own-runners/adding-self-hosted-runners#adding-a-self-hosted-runner-to-a-repository) which you had to ran manually without ARC.
|
||||||
|
|
||||||
|
`config.sh` is the script provided by `actions/runner` to pre-configure the runner process before being started. One of the options provided by `config.sh` is `--work`,
|
||||||
|
which specifies the working directory where the runner runs your workflow jobs in.
|
||||||
|
|
||||||
|
The volume and the partition that hosts the work directory should have several or dozens of GBs free space that might be used by your workflow jobs.
|
||||||
|
|
||||||
|
By default, ARC uses `/runner/_work` as work directory, which is powered by Kubernetes's `emptyDir`. [`emptyDir` is usually backed by a directory created within a host's volume](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir), somewhere under `/var/lib/kuberntes/pods`. Therefore
|
||||||
|
your host's volume that is backing `/var/lib/kubernetes/pods` must have enough free space to serve all the concurrent runner pods that might be deployed onto your host at the same time.
|
||||||
|
|
||||||
|
So, in case you see a job failure seemingly due to "disk full", it's very likely you need to reconfigure your host to have more free space.
|
||||||
|
|
||||||
|
In case you can't rely on host's volume, consider using `RunnerSet` and backing the work directory with a ephemeral PV.
|
||||||
|
|
||||||
|
Kubernetes 1.23 or greater provides the support for [generic ephemeral volumes](https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes), which is designed to support this exact use-case. It's defined in the Pod spec API so it isn't currently available for `RunnerDeployment`. `RunnerSet` is based on Kubernetes' `StatefulSet` which mostly embeds the Pod spec under `spec.template.spec`, so there you go.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kind: RunnerSet
|
||||||
|
metadata:
|
||||||
|
name: example
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: runner
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /runner/_work
|
||||||
|
name: work
|
||||||
|
- name: docker
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /runner/_work
|
||||||
|
name: work
|
||||||
|
volumes:
|
||||||
|
- name: work
|
||||||
|
ephemeral:
|
||||||
|
volumeClaimTemplate:
|
||||||
|
spec:
|
||||||
|
accessModes: [ "ReadWriteOnce" ]
|
||||||
|
storageClassName: "runner-work-dir"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Gi
|
||||||
|
```
|
||||||
|
|
||||||
### Runner Labels
|
### Runner Labels
|
||||||
|
|
||||||
To run a workflow job on a self-hosted runner, you can use the following syntax in your workflow:
|
To run a workflow job on a self-hosted runner, you can use the following syntax in your workflow:
|
||||||
@@ -1311,13 +1650,6 @@ spec:
|
|||||||
# Disables automatic runner updates
|
# Disables automatic runner updates
|
||||||
- name: DISABLE_RUNNER_UPDATE
|
- name: DISABLE_RUNNER_UPDATE
|
||||||
value: "true"
|
value: "true"
|
||||||
# Configure runner with --ephemeral instead of --once flag
|
|
||||||
# WARNING | THIS ENV VAR IS DEPRECATED AND WILL BE REMOVED
|
|
||||||
# IN A FUTURE VERSION OF ARC. IN 0.22.0 ARC SETS --ephemeral VIA
|
|
||||||
# THE CONTROLLER SETTING THIS ENV VAR ON POD CREATION.
|
|
||||||
# THIS ENV VAR WILL BE REMOVED, SEE ISSUE #1196 FOR DETAILS
|
|
||||||
- name: RUNNER_FEATURE_FLAG_EPHEMERAL
|
|
||||||
value: "true"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Using IRSA (IAM Roles for Service Accounts) in EKS
|
### Using IRSA (IAM Roles for Service Accounts) in EKS
|
||||||
|
|||||||
22
SECURITY.md
Normal file
22
SECURITY.md
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Security Policy
|
||||||
|
|
||||||
|
## Sponsoring the project
|
||||||
|
|
||||||
|
This project is maintained by a small team of two and therefore lacks the resource to provide security fixes in a timely manner.
|
||||||
|
|
||||||
|
If you have important business(es) that relies on this project, please consider sponsoring the project so that the maintainer(s) can commit to providing such service.
|
||||||
|
|
||||||
|
Please refer to https://github.com/sponsors/actions-runner-controller for available tiers.
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
| Version | Supported |
|
||||||
|
| ------- | ------------------ |
|
||||||
|
| 0.23.0 | :white_check_mark: |
|
||||||
|
| < 0.23.0| :x: |
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
To report a security issue, please email ykuoka+arcsecurity(at)gmail.com with a description of the issue, the steps you took to create the issue, affected versions, and, if known, mitigations for the issue.
|
||||||
|
|
||||||
|
A maintainer will try to respond within 5 working days. If the issue is confirmed as a vulnerability, a Security Advisory will be opened. This project tries to follow a 90 day disclosure timeline.
|
||||||
@@ -1,10 +1,92 @@
|
|||||||
# Troubleshooting
|
# Troubleshooting
|
||||||
|
|
||||||
* [Invalid header field value](#invalid-header-field-value)
|
* [Tools](#tools)
|
||||||
* [Runner coming up before network available](#runner-coming-up-before-network-available)
|
* [Installation](#installation)
|
||||||
* [Deployment fails on GKE due to webhooks](#deployment-fails-on-gke-due-to-webhooks)
|
* [InternalError when calling webhook: context deadline exceeded](#internalerror-when-calling-webhook-context-deadline-exceeded)
|
||||||
|
* [Invalid header field value](#invalid-header-field-value)
|
||||||
|
* [Operations](#operations)
|
||||||
|
* [Stuck runner kind or backing pod](#stuck-runner-kind-or-backing-pod)
|
||||||
|
* [Delay in jobs being allocated to runners](#delay-in-jobs-being-allocated-to-runners)
|
||||||
|
* [Runner coming up before network available](#runner-coming-up-before-network-available)
|
||||||
|
* [Outgoing network action hangs indefinitely](#outgoing-network-action-hangs-indefinitely)
|
||||||
|
* [Unable to scale to zero with TotalNumberOfQueuedAndInProgressWorkflowRuns](#unable-to-scale-to-zero-with-totalnumberofqueuedandinprogressworkflowruns)
|
||||||
|
|
||||||
## Invalid header field value
|
## Tools
|
||||||
|
|
||||||
|
A list of tools which are helpful for troubleshooting
|
||||||
|
|
||||||
|
* https://github.com/rewanthtammana/kubectl-fields Kubernetes resources hierarchy parsing tool
|
||||||
|
* https://github.com/stern/stern Multi pod and container log tailing for Kubernetes
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
Troubeshooting runbooks that relate to ARC installation problems
|
||||||
|
|
||||||
|
### InternalError when calling webhook: context deadline exceeded
|
||||||
|
|
||||||
|
**Problem**
|
||||||
|
|
||||||
|
This issue can come up for various reasons like leftovers from previous installations or not being able to access the K8s service's clusterIP associated with the admission webhook server (of ARC).
|
||||||
|
|
||||||
|
```
|
||||||
|
Internal error occurred: failed calling webhook "mutate.runnerdeployment.actions.summerwind.dev":
|
||||||
|
Post "https://actions-runner-controller-webhook.actions-runner-system.svc:443/mutate-actions-summerwind-dev-v1alpha1-runnerdeployment?timeout=10s": context deadline exceeded
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution**
|
||||||
|
|
||||||
|
First we will try the common solution of checking webhook leftovers from previous installations:
|
||||||
|
|
||||||
|
1. ```bash
|
||||||
|
kubectl get validatingwebhookconfiguration -A
|
||||||
|
kubectl get mutatingwebhookconfiguration -A
|
||||||
|
```
|
||||||
|
2. If you see any webhooks related to actions-runner-controller, delete them:
|
||||||
|
```bash
|
||||||
|
kubectl delete mutatingwebhookconfiguration actions-runner-controller-mutating-webhook-configuration
|
||||||
|
kubectl delete validatingwebhookconfiguration actions-runner-controller-validating-webhook-configuration
|
||||||
|
```
|
||||||
|
|
||||||
|
If that didn't work then probably your K8s control-plane is somehow unable to access the K8s service's clusterIP associated with the admission webhook server:
|
||||||
|
1. You're running apiserver as a binary and you didn't make service cluster IPs available to the host network.
|
||||||
|
2. You're running the apiserver in the pod but your pod network (i.e. CNI plugin installation and config) is not good so your pods(like kube-apiserver) in the K8s control-plane nodes can't access ARC's admission webhook server pod(s) in probably data-plane nodes.
|
||||||
|
|
||||||
|
|
||||||
|
Another reason could be due to GKEs firewall settings you may run into the following errors when trying to deploy runners on a private GKE cluster:
|
||||||
|
|
||||||
|
To fix this, you may either:
|
||||||
|
|
||||||
|
1. Configure the webhook to use another port, such as 443 or 10250, [each of
|
||||||
|
which allow traffic by default](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules).
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# With helm, you'd set `webhookPort` to the port number of your choice
|
||||||
|
# See https://github.com/actions-runner-controller/actions-runner-controller/pull/1410/files for more information
|
||||||
|
helm upgrade --install --namespace actions-runner-system --create-namespace \
|
||||||
|
--wait actions-runner-controller actions-runner-controller/actions-runner-controller \
|
||||||
|
--set webhookPort=10250
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Set up a firewall rule to allow the master node to connect to the default
|
||||||
|
webhook port. The exact way to do this may vary, but the following script
|
||||||
|
should point you in the right direction:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# 1) Retrieve the network tag automatically given to the worker nodes
|
||||||
|
# NOTE: this only works if you have only one cluster in your GCP project. You will have to manually inspect the result of this command to find the tag for the cluster you want to target
|
||||||
|
WORKER_NODES_TAG=$(gcloud compute instances list --format='text(tags.items[0])' --filter='metadata.kubelet-config:*' | grep tags | awk '{print $2}' | sort | uniq)
|
||||||
|
|
||||||
|
# 2) Take note of the VPC network in which you deployed your cluster
|
||||||
|
# NOTE this only works if you have only one network in which you deploy your clusters
|
||||||
|
NETWORK=$(gcloud compute instances list --format='text(networkInterfaces[0].network)' --filter='metadata.kubelet-config:*' | grep networks | awk -F'/' '{print $NF}' | sort | uniq)
|
||||||
|
|
||||||
|
# 3) Get the master source ip block
|
||||||
|
SOURCE=$(gcloud container clusters describe <cluster-name> --region <region> | grep masterIpv4CidrBlock| cut -d ':' -f 2 | tr -d ' ')
|
||||||
|
|
||||||
|
gcloud compute firewall-rules create k8s-cert-manager --source-ranges $SOURCE --target-tags $WORKER_NODES_TAG --allow TCP:9443 --network $NETWORK
|
||||||
|
```
|
||||||
|
|
||||||
|
### Invalid header field value
|
||||||
|
|
||||||
**Problem**
|
**Problem**
|
||||||
|
|
||||||
@@ -23,7 +105,57 @@ Your base64'ed PAT token has a new line at the end, it needs to be created witho
|
|||||||
* `echo -n $TOKEN | base64`
|
* `echo -n $TOKEN | base64`
|
||||||
* Create the secret as described in the docs using the shell and documented flags
|
* Create the secret as described in the docs using the shell and documented flags
|
||||||
|
|
||||||
## Runner coming up before network available
|
|
||||||
|
## Operations
|
||||||
|
|
||||||
|
Troubeshooting runbooks that relate to ARC operational problems
|
||||||
|
|
||||||
|
### Stuck runner kind or backing pod
|
||||||
|
|
||||||
|
**Problem**
|
||||||
|
|
||||||
|
Sometimes either the runner kind (`kubectl get runners`) or it's underlying pod can get stuck in a terminating state for various reasons. You can get the kind unstuck by removing its finaliser using something like this:
|
||||||
|
|
||||||
|
**Solution**
|
||||||
|
|
||||||
|
Remove the finaliser from the relevent runner kind or pod
|
||||||
|
|
||||||
|
```
|
||||||
|
# Get all kind runners and remove the finalizer
|
||||||
|
$ kubectl get runners --no-headers | awk {'print $1'} | xargs kubectl patch runner --type merge -p '{"metadata":{"finalizers":null}}'
|
||||||
|
|
||||||
|
# Get all pods that are stuck terminating and remove the finalizer
|
||||||
|
$ kubectl -n get pods | grep Terminating | awk {'print $1'} | xargs kubectl patch pod -p '{"metadata":{"finalizers":null}}'
|
||||||
|
```
|
||||||
|
|
||||||
|
_Note the code assumes you have already selected the namespace your runners are in and that they
|
||||||
|
are in a namespace not shared with anything else_
|
||||||
|
|
||||||
|
### Delay in jobs being allocated to runners
|
||||||
|
|
||||||
|
**Problem**
|
||||||
|
|
||||||
|
ARC isn't involved in jobs actually getting allocated to a runner. ARC is responsible for orchestrating runners and the runner lifecycle. Why some people see large delays in job allocation is not clear however it has been https://github.com/actions-runner-controller/actions-runner-controller/issues/1387#issuecomment-1122593984 that this is caused from the self-update process somehow.
|
||||||
|
|
||||||
|
**Solution**
|
||||||
|
|
||||||
|
Disable the self-update process in your runner manifests
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: RunnerDeployment
|
||||||
|
metadata:
|
||||||
|
name: example-runnerdeployment-with-sleep
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
...
|
||||||
|
env:
|
||||||
|
- name: DISABLE_RUNNER_UPDATE
|
||||||
|
value: "true"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Runner coming up before network available
|
||||||
|
|
||||||
**Problem**
|
**Problem**
|
||||||
|
|
||||||
@@ -61,40 +193,48 @@ metadata:
|
|||||||
spec:
|
spec:
|
||||||
template:
|
template:
|
||||||
spec:
|
spec:
|
||||||
|
...
|
||||||
env:
|
env:
|
||||||
# This runner's entrypoint script will have a 5 seconds delay
|
|
||||||
# as a first action within the entrypoint script
|
|
||||||
- name: STARTUP_DELAY_IN_SECONDS
|
- name: STARTUP_DELAY_IN_SECONDS
|
||||||
value: "5"
|
value: "5"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Deployment fails on GKE due to webhooks
|
## Outgoing network action hangs indefinitely
|
||||||
|
|
||||||
**Problem**
|
**Problem**
|
||||||
|
|
||||||
Due to GKEs firewall settings you may run into the following errors when trying to deploy runners on a private GKE cluster:
|
Some random outgoing network actions hangs indefinitely. This could be because your cluster does not give Docker the standard MTU of 1500, you can check this out by running `ip link` in a pod that encounters the problem and reading the outgoing interface's MTU value. If it is smaller than 1500, then try the following.
|
||||||
|
|
||||||
```
|
**Solution**
|
||||||
Internal error occurred: failed calling webhook "mutate.runner.actions.summerwind.dev":
|
|
||||||
Post https://webhook-service.actions-runner-system.svc:443/mutate-actions-summerwind-dev-v1alpha1-runner?timeout=10s:
|
Add a `dockerMTU` key in your runner's spec with the value you read on the outgoing interface. For instance:
|
||||||
context deadline exceeded
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: RunnerDeployment
|
||||||
|
metadata:
|
||||||
|
name: github-runner
|
||||||
|
namespace: github-system
|
||||||
|
spec:
|
||||||
|
replicas: 6
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
dockerMTU: 1400
|
||||||
|
repository: $username/$repo
|
||||||
|
env: []
|
||||||
```
|
```
|
||||||
|
|
||||||
**Solution**<br />
|
There may be more places you need to tweak for MTU.
|
||||||
|
Please consult issues like #651 for more information.
|
||||||
|
|
||||||
To fix this, you need to set up a firewall rule to allow the master node to connect to the webhook port.
|
## Unable to scale to zero with TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||||
The exact way to do this may wary, but the following script should point you in the right direction:
|
|
||||||
|
|
||||||
```
|
**Problem**
|
||||||
# 1) Retrieve the network tag automatically given to the worker nodes
|
|
||||||
# NOTE: this only works if you have only one cluster in your GCP project. You will have to manually inspect the result of this command to find the tag for the cluster you want to target
|
|
||||||
WORKER_NODES_TAG=$(gcloud compute instances list --format='text(tags.items[0])' --filter='metadata.kubelet-config:*' | grep tags | awk '{print $2}' | sort | uniq)
|
|
||||||
|
|
||||||
# 2) Take note of the VPC network in which you deployed your cluster
|
HRA doesn't scale the RunnerDeployment to zero, even though you did configure HRA correctly, to have a pull-based scaling metric `TotalNumberOfQueuedAndInProgressWorkflowRuns`, and set `minReplicas: 0`.
|
||||||
# NOTE this only works if you have only one network in which you deploy your clusters
|
|
||||||
NETWORK=$(gcloud compute instances list --format='text(networkInterfaces[0].network)' --filter='metadata.kubelet-config:*' | grep networks | awk -F'/' '{print $NF}' | sort | uniq)
|
|
||||||
|
|
||||||
# 3) Get the master source ip block
|
**Solution**
|
||||||
SOURCE=$(gcloud container clusters describe <cluster-name> --region <region> | grep masterIpv4CidrBlock| cut -d ':' -f 2 | tr -d ' ')
|
|
||||||
gcloud compute firewall-rules create k8s-cert-manager --source-ranges $SOURCE --target-tags $WORKER_NODES_TAG --allow TCP:9443 --network $NETWORK
|
You very likely have some dangling workflow jobs stuck in `queued` or `in_progress` as seen in [#1057](https://github.com/actions-runner-controller/actions-runner-controller/issues/1057#issuecomment-1133439061).
|
||||||
```
|
|
||||||
|
Manually call [the "list workflow runs" API](https://docs.github.com/en/rest/actions/workflow-runs#list-workflow-runs-for-a-repository), and [remove the dangling workflow job(s)](https://docs.github.com/en/rest/actions/workflow-runs#delete-a-workflow-run).
|
||||||
|
|||||||
@@ -76,56 +76,3 @@ kubectl -n actions-runner-system wait deploy/actions-runner-controller --for con
|
|||||||
|
|
||||||
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
|
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
|
||||||
sleep 20
|
sleep 20
|
||||||
|
|
||||||
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
|
||||||
|
|
||||||
if [ -n "${TEST_REPO}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl apply -f -
|
|
||||||
else
|
|
||||||
echo 'Deploying runnerdeployment and hra. Set USE_RUNNERSET if you want to deploy runnerset instead.'
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerdeploy envsubst | kubectl apply -f -
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo 'Skipped deploying runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy.'
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${TEST_ORG}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerset envsubst | kubectl apply -f -
|
|
||||||
else
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerdeploy envsubst | kubectl apply -f -
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${TEST_ORG_GROUP}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orgroupg-runnerset envsubst | kubectl apply -f -
|
|
||||||
else
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerdeploy envsubst | kubectl apply -f -
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ORG_GROUP to deploy.'
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo 'Skipped deploying organizational runnerdeployment. Set TEST_ORG to deploy.'
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${TEST_ENTERPRISE}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerset envsubst | kubectl apply -f -
|
|
||||||
else
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerdeploy envsubst | kubectl apply -f -
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${TEST_ENTERPRISE_GROUP}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerset envsubst | kubectl apply -f -
|
|
||||||
else
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerdeploy envsubst | kubectl apply -f -
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ENTERPRISE_GROUP to deploy.'
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ENTERPRISE to deploy.'
|
|
||||||
fi
|
|
||||||
|
|||||||
58
acceptance/deploy_runners.sh
Executable file
58
acceptance/deploy_runners.sh
Executable file
@@ -0,0 +1,58 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
OP=${OP:-apply}
|
||||||
|
|
||||||
|
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
||||||
|
|
||||||
|
if [ -n "${TEST_REPO}" ]; then
|
||||||
|
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||||
|
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl ${OP} -f -
|
||||||
|
else
|
||||||
|
echo "Running ${OP} runnerdeployment and hra. Set USE_RUNNERSET if you want to deploy runnerset instead."
|
||||||
|
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Skipped ${OP} for runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy."
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${TEST_ORG}" ]; then
|
||||||
|
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||||
|
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerset envsubst | kubectl ${OP} -f -
|
||||||
|
else
|
||||||
|
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${TEST_ORG_GROUP}" ]; then
|
||||||
|
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||||
|
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerset envsubst | kubectl ${OP} -f -
|
||||||
|
else
|
||||||
|
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ORG_GROUP to ${OP}."
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Skipped ${OP} on organizational runnerdeployment. Set TEST_ORG to ${OP}."
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${TEST_ENTERPRISE}" ]; then
|
||||||
|
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||||
|
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerset envsubst | kubectl ${OP} -f -
|
||||||
|
else
|
||||||
|
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${TEST_ENTERPRISE_GROUP}" ]; then
|
||||||
|
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||||
|
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerset envsubst | kubectl ${OP} -f -
|
||||||
|
else
|
||||||
|
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ENTERPRISE_GROUP to ${OP}."
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ENTERPRISE to ${OP}."
|
||||||
|
fi
|
||||||
@@ -19,11 +19,6 @@ spec:
|
|||||||
|
|
||||||
ephemeral: ${TEST_EPHEMERAL}
|
ephemeral: ${TEST_EPHEMERAL}
|
||||||
|
|
||||||
# Whether to pass --ephemeral (true) or --once (false, deprecated)
|
|
||||||
env:
|
|
||||||
- name: RUNNER_FEATURE_FLAG_EPHEMERAL
|
|
||||||
value: "${RUNNER_FEATURE_FLAG_EPHEMERAL}"
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# dockerd within runner container
|
# dockerd within runner container
|
||||||
#
|
#
|
||||||
@@ -57,7 +52,8 @@ spec:
|
|||||||
scaleTargetRef:
|
scaleTargetRef:
|
||||||
name: ${NAME}
|
name: ${NAME}
|
||||||
scaleUpTriggers:
|
scaleUpTriggers:
|
||||||
- githubEvent: {}
|
- githubEvent:
|
||||||
|
workflowJob: {}
|
||||||
amount: 1
|
amount: 1
|
||||||
duration: "10m"
|
duration: "10m"
|
||||||
minReplicas: ${RUNNER_MIN_REPLICAS}
|
minReplicas: ${RUNNER_MIN_REPLICAS}
|
||||||
|
|||||||
187
acceptance/testdata/runnerset.envsubst.yaml
vendored
187
acceptance/testdata/runnerset.envsubst.yaml
vendored
@@ -1,3 +1,59 @@
|
|||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: ${NAME}-runner-work-dir
|
||||||
|
labels:
|
||||||
|
content: ${NAME}-runner-work-dir
|
||||||
|
provisioner: rancher.io/local-path
|
||||||
|
reclaimPolicy: Delete
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: ${NAME}
|
||||||
|
# In kind environments, the provider writes:
|
||||||
|
# /var/lib/docker/volumes/KIND_NODE_CONTAINER_VOL_ID/_data/local-path-provisioner/PV_NAME
|
||||||
|
# It can be hundreds of gigabytes depending on what you cache in the test workflow. Beware to not encounter `no space left on device` errors!
|
||||||
|
# If you did encounter no space errorrs try:
|
||||||
|
# docker system prune
|
||||||
|
# docker buildx prune #=> frees up /var/lib/docker/volumes/buildx_buildkit_container-builder0_state
|
||||||
|
# sudo rm -rf /var/lib/docker/volumes/KIND_NODE_CONTAINER_VOL_ID/_data/local-path-provisioner #=> frees up local-path-provisioner's data
|
||||||
|
provisioner: rancher.io/local-path
|
||||||
|
reclaimPolicy: Retain
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: ${NAME}-var-lib-docker
|
||||||
|
labels:
|
||||||
|
content: ${NAME}-var-lib-docker
|
||||||
|
provisioner: rancher.io/local-path
|
||||||
|
reclaimPolicy: Retain
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: ${NAME}-cache
|
||||||
|
labels:
|
||||||
|
content: ${NAME}-cache
|
||||||
|
provisioner: rancher.io/local-path
|
||||||
|
reclaimPolicy: Retain
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: ${NAME}-runner-tool-cache
|
||||||
|
labels:
|
||||||
|
content: ${NAME}-runner-tool-cache
|
||||||
|
provisioner: rancher.io/local-path
|
||||||
|
reclaimPolicy: Retain
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
---
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
kind: RunnerSet
|
kind: RunnerSet
|
||||||
metadata:
|
metadata:
|
||||||
@@ -62,8 +118,125 @@ spec:
|
|||||||
env:
|
env:
|
||||||
- name: RUNNER_FEATURE_FLAG_EPHEMERAL
|
- name: RUNNER_FEATURE_FLAG_EPHEMERAL
|
||||||
value: "${RUNNER_FEATURE_FLAG_EPHEMERAL}"
|
value: "${RUNNER_FEATURE_FLAG_EPHEMERAL}"
|
||||||
#- name: docker
|
- name: GOMODCACHE
|
||||||
# #image: mumoshu/actions-runner-dind:dev
|
value: "/home/runner/.cache/go-mod"
|
||||||
|
# PV-backed runner work dir
|
||||||
|
volumeMounts:
|
||||||
|
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
||||||
|
# The volume and mount with the same names will be created by workVolumeClaimTemplate and the kubernetes container mode support.
|
||||||
|
# - name: work
|
||||||
|
# mountPath: /runner/_work
|
||||||
|
# Cache docker image layers, in case dockerdWithinRunnerContainer=true
|
||||||
|
- name: var-lib-docker
|
||||||
|
mountPath: /var/lib/docker
|
||||||
|
# Cache go modules and builds
|
||||||
|
# - name: gocache
|
||||||
|
# # Run `goenv | grep GOCACHE` to verify the path is correct for your env
|
||||||
|
# mountPath: /home/runner/.cache/go-build
|
||||||
|
# - name: gomodcache
|
||||||
|
# # Run `goenv | grep GOMODCACHE` to verify the path is correct for your env
|
||||||
|
# # mountPath: /home/runner/go/pkg/mod
|
||||||
|
- name: cache
|
||||||
|
# go: could not create module cache: stat /home/runner/.cache/go-mod: permission denied
|
||||||
|
mountPath: "/home/runner/.cache"
|
||||||
|
- name: runner-tool-cache
|
||||||
|
# This corresponds to our runner image's default setting of RUNNER_TOOL_CACHE=/opt/hostedtoolcache.
|
||||||
|
#
|
||||||
|
# In case you customize the envvar in both runner and docker containers of the runner pod spec,
|
||||||
|
# You'd need to change this mountPath accordingly.
|
||||||
|
#
|
||||||
|
# The tool cache directory is defined in actions/toolkit's tool-cache module:
|
||||||
|
# https://github.com/actions/toolkit/blob/2f164000dcd42fb08287824a3bc3030dbed33687/packages/tool-cache/src/tool-cache.ts#L621-L638
|
||||||
|
#
|
||||||
|
# Many setup-* actions like setup-go utilizes the tool-cache module to download and cache installed binaries:
|
||||||
|
# https://github.com/actions/setup-go/blob/56a61c9834b4a4950dbbf4740af0b8a98c73b768/src/installer.ts#L144
|
||||||
|
mountPath: "/opt/hostedtoolcache"
|
||||||
|
# Valid only when dockerdWithinRunnerContainer=false
|
||||||
|
- name: docker
|
||||||
|
# PV-backed runner work dir
|
||||||
|
volumeMounts:
|
||||||
|
- name: work
|
||||||
|
mountPath: /runner/_work
|
||||||
|
# Cache docker image layers, in case dockerdWithinRunnerContainer=false
|
||||||
|
- name: var-lib-docker
|
||||||
|
mountPath: /var/lib/docker
|
||||||
|
# image: mumoshu/actions-runner-dind:dev
|
||||||
|
|
||||||
|
# For buildx cache
|
||||||
|
- name: cache
|
||||||
|
mountPath: "/home/runner/.cache"
|
||||||
|
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
||||||
|
# volumes:
|
||||||
|
# - name: work
|
||||||
|
# ephemeral:
|
||||||
|
# volumeClaimTemplate:
|
||||||
|
# spec:
|
||||||
|
# accessModes:
|
||||||
|
# - ReadWriteOnce
|
||||||
|
# storageClassName: "${NAME}-runner-work-dir"
|
||||||
|
# resources:
|
||||||
|
# requests:
|
||||||
|
# storage: 10Gi
|
||||||
|
volumeClaimTemplates:
|
||||||
|
- metadata:
|
||||||
|
name: vol1
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Mi
|
||||||
|
storageClassName: ${NAME}
|
||||||
|
## Dunno which provider supports auto-provisioning with selector.
|
||||||
|
## At least the rancher local path provider stopped with:
|
||||||
|
## waiting for a volume to be created, either by external provisioner "rancher.io/local-path" or manually created by system administrator
|
||||||
|
# selector:
|
||||||
|
# matchLabels:
|
||||||
|
# runnerset-volume-id: ${NAME}-vol1
|
||||||
|
- metadata:
|
||||||
|
name: vol2
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Mi
|
||||||
|
storageClassName: ${NAME}
|
||||||
|
# selector:
|
||||||
|
# matchLabels:
|
||||||
|
# runnerset-volume-id: ${NAME}-vol2
|
||||||
|
- metadata:
|
||||||
|
name: var-lib-docker
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Mi
|
||||||
|
storageClassName: ${NAME}-var-lib-docker
|
||||||
|
- metadata:
|
||||||
|
name: cache
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Mi
|
||||||
|
storageClassName: ${NAME}-cache
|
||||||
|
- metadata:
|
||||||
|
name: runner-tool-cache
|
||||||
|
# It turns out labels doesn't distinguish PVs across PVCs and the
|
||||||
|
# end result is PVs are reused by wrong PVCs.
|
||||||
|
# The correct way seems to be to differentiate storage class per pvc template.
|
||||||
|
# labels:
|
||||||
|
# id: runner-tool-cache
|
||||||
|
spec:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Mi
|
||||||
|
storageClassName: ${NAME}-runner-tool-cache
|
||||||
---
|
---
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
kind: HorizontalRunnerAutoscaler
|
kind: HorizontalRunnerAutoscaler
|
||||||
@@ -74,9 +247,17 @@ spec:
|
|||||||
kind: RunnerSet
|
kind: RunnerSet
|
||||||
name: ${NAME}
|
name: ${NAME}
|
||||||
scaleUpTriggers:
|
scaleUpTriggers:
|
||||||
- githubEvent: {}
|
- githubEvent:
|
||||||
|
workflowJob: {}
|
||||||
amount: 1
|
amount: 1
|
||||||
duration: "10m"
|
duration: "10m"
|
||||||
minReplicas: ${RUNNER_MIN_REPLICAS}
|
minReplicas: ${RUNNER_MIN_REPLICAS}
|
||||||
maxReplicas: 10
|
maxReplicas: 10
|
||||||
scaleDownDelaySecondsAfterScaleOut: ${RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT}
|
scaleDownDelaySecondsAfterScaleOut: ${RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT}
|
||||||
|
# Comment out the whole metrics if you'd like to solely test webhook-based scaling
|
||||||
|
metrics:
|
||||||
|
- type: PercentageRunnersBusy
|
||||||
|
scaleUpThreshold: '0.75'
|
||||||
|
scaleDownThreshold: '0.25'
|
||||||
|
scaleUpFactor: '2'
|
||||||
|
scaleDownFactor: '0.5'
|
||||||
|
|||||||
@@ -18,8 +18,10 @@ package v1alpha1
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@@ -71,6 +73,9 @@ type RunnerConfig struct {
|
|||||||
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
|
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
|
||||||
// +optional
|
// +optional
|
||||||
VolumeStorageMedium *string `json:"volumeStorageMedium,omitempty"`
|
VolumeStorageMedium *string `json:"volumeStorageMedium,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
ContainerMode string `json:"containerMode,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunnerPodSpec defines the desired pod spec fields of the runner pod
|
// RunnerPodSpec defines the desired pod spec fields of the runner pod
|
||||||
@@ -135,6 +140,9 @@ type RunnerPodSpec struct {
|
|||||||
// +optional
|
// +optional
|
||||||
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
PriorityClassName string `json:"priorityClassName,omitempty"`
|
||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
|
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
|
||||||
|
|
||||||
@@ -154,10 +162,37 @@ type RunnerPodSpec struct {
|
|||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
DnsConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"`
|
DnsConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
WorkVolumeClaimTemplate *WorkVolumeClaimTemplate `json:"workVolumeClaimTemplate,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RunnerSpec) Validate(rootPath *field.Path) field.ErrorList {
|
||||||
|
var (
|
||||||
|
errList field.ErrorList
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
err = rs.validateRepository()
|
||||||
|
if err != nil {
|
||||||
|
errList = append(errList, field.Invalid(rootPath.Child("repository"), rs.Repository, err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
err = rs.validateWorkVolumeClaimTemplate()
|
||||||
|
if err != nil {
|
||||||
|
errList = append(errList, field.Invalid(rootPath.Child("workVolumeClaimTemplate"), rs.WorkVolumeClaimTemplate, err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
err = rs.validateIsServiceAccountNameSet()
|
||||||
|
if err != nil {
|
||||||
|
errList = append(errList, field.Invalid(rootPath.Child("serviceAccountName"), rs.ServiceAccountName, err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
return errList
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateRepository validates repository field.
|
// ValidateRepository validates repository field.
|
||||||
func (rs *RunnerSpec) ValidateRepository() error {
|
func (rs *RunnerSpec) validateRepository() error {
|
||||||
// Enterprise, Organization and repository are both exclusive.
|
// Enterprise, Organization and repository are both exclusive.
|
||||||
foundCount := 0
|
foundCount := 0
|
||||||
if len(rs.Organization) > 0 {
|
if len(rs.Organization) > 0 {
|
||||||
@@ -179,6 +214,29 @@ func (rs *RunnerSpec) ValidateRepository() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (rs *RunnerSpec) validateWorkVolumeClaimTemplate() error {
|
||||||
|
if rs.ContainerMode != "kubernetes" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.WorkVolumeClaimTemplate == nil {
|
||||||
|
return errors.New("Spec.ContainerMode: kubernetes must have workVolumeClaimTemplate field specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
return rs.WorkVolumeClaimTemplate.validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RunnerSpec) validateIsServiceAccountNameSet() error {
|
||||||
|
if rs.ContainerMode != "kubernetes" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.ServiceAccountName == "" {
|
||||||
|
return errors.New("service account name is required if container mode is kubernetes")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// RunnerStatus defines the observed state of Runner
|
// RunnerStatus defines the observed state of Runner
|
||||||
type RunnerStatus struct {
|
type RunnerStatus struct {
|
||||||
// Turns true only if the runner pod is ready.
|
// Turns true only if the runner pod is ready.
|
||||||
@@ -207,6 +265,51 @@ type RunnerStatusRegistration struct {
|
|||||||
ExpiresAt metav1.Time `json:"expiresAt"`
|
ExpiresAt metav1.Time `json:"expiresAt"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type WorkVolumeClaimTemplate struct {
|
||||||
|
StorageClassName string `json:"storageClassName"`
|
||||||
|
AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes"`
|
||||||
|
Resources corev1.ResourceRequirements `json:"resources"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkVolumeClaimTemplate) validate() error {
|
||||||
|
if w.AccessModes == nil || len(w.AccessModes) == 0 {
|
||||||
|
return errors.New("Access mode should have at least one mode specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, accessMode := range w.AccessModes {
|
||||||
|
switch accessMode {
|
||||||
|
case corev1.ReadWriteOnce, corev1.ReadWriteMany:
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Access mode %v is not supported", accessMode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkVolumeClaimTemplate) V1Volume() corev1.Volume {
|
||||||
|
return corev1.Volume{
|
||||||
|
Name: "work",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
Ephemeral: &corev1.EphemeralVolumeSource{
|
||||||
|
VolumeClaimTemplate: &corev1.PersistentVolumeClaimTemplate{
|
||||||
|
Spec: corev1.PersistentVolumeClaimSpec{
|
||||||
|
AccessModes: w.AccessModes,
|
||||||
|
StorageClassName: &w.StorageClassName,
|
||||||
|
Resources: w.Resources,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkVolumeClaimTemplate) V1VolumeMount(mountPath string) corev1.VolumeMount {
|
||||||
|
return corev1.VolumeMount{
|
||||||
|
MountPath: mountPath,
|
||||||
|
Name: "work",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
// +kubebuilder:subresource:status
|
// +kubebuilder:subresource:status
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.enterprise",name=Enterprise,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.enterprise",name=Enterprise,type=string
|
||||||
|
|||||||
@@ -66,15 +66,7 @@ func (r *Runner) ValidateDelete() error {
|
|||||||
|
|
||||||
// Validate validates resource spec.
|
// Validate validates resource spec.
|
||||||
func (r *Runner) Validate() error {
|
func (r *Runner) Validate() error {
|
||||||
var (
|
errList := r.Spec.Validate(field.NewPath("spec"))
|
||||||
errList field.ErrorList
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
err = r.Spec.ValidateRepository()
|
|
||||||
if err != nil {
|
|
||||||
errList = append(errList, field.Invalid(field.NewPath("spec", "repository"), r.Spec.Repository, err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errList) > 0 {
|
if len(errList) > 0 {
|
||||||
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
||||||
|
|||||||
@@ -66,15 +66,7 @@ func (r *RunnerDeployment) ValidateDelete() error {
|
|||||||
|
|
||||||
// Validate validates resource spec.
|
// Validate validates resource spec.
|
||||||
func (r *RunnerDeployment) Validate() error {
|
func (r *RunnerDeployment) Validate() error {
|
||||||
var (
|
errList := r.Spec.Template.Spec.Validate(field.NewPath("spec", "template", "spec"))
|
||||||
errList field.ErrorList
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
err = r.Spec.Template.Spec.ValidateRepository()
|
|
||||||
if err != nil {
|
|
||||||
errList = append(errList, field.Invalid(field.NewPath("spec", "template", "spec", "repository"), r.Spec.Template.Spec.Repository, err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errList) > 0 {
|
if len(errList) > 0 {
|
||||||
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
||||||
|
|||||||
@@ -66,15 +66,7 @@ func (r *RunnerReplicaSet) ValidateDelete() error {
|
|||||||
|
|
||||||
// Validate validates resource spec.
|
// Validate validates resource spec.
|
||||||
func (r *RunnerReplicaSet) Validate() error {
|
func (r *RunnerReplicaSet) Validate() error {
|
||||||
var (
|
errList := r.Spec.Template.Spec.Validate(field.NewPath("spec", "template", "spec"))
|
||||||
errList field.ErrorList
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
err = r.Spec.Template.Spec.ValidateRepository()
|
|
||||||
if err != nil {
|
|
||||||
errList = append(errList, field.Invalid(field.NewPath("spec", "template", "spec", "repository"), r.Spec.Template.Spec.Repository, err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errList) > 0 {
|
if len(errList) > 0 {
|
||||||
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
||||||
|
|||||||
@@ -33,6 +33,12 @@ type RunnerSetSpec struct {
|
|||||||
// +nullable
|
// +nullable
|
||||||
EffectiveTime *metav1.Time `json:"effectiveTime,omitempty"`
|
EffectiveTime *metav1.Time `json:"effectiveTime,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
ServiceAccountName string `json:"serviceAccountName,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
WorkVolumeClaimTemplate *WorkVolumeClaimTemplate `json:"workVolumeClaimTemplate,omitempty"`
|
||||||
|
|
||||||
appsv1.StatefulSetSpec `json:",inline"`
|
appsv1.StatefulSetSpec `json:",inline"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -741,6 +741,11 @@ func (in *RunnerPodSpec) DeepCopyInto(out *RunnerPodSpec) {
|
|||||||
*out = new(v1.PodDNSConfig)
|
*out = new(v1.PodDNSConfig)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
|
if in.WorkVolumeClaimTemplate != nil {
|
||||||
|
in, out := &in.WorkVolumeClaimTemplate, &out.WorkVolumeClaimTemplate
|
||||||
|
*out = new(WorkVolumeClaimTemplate)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerPodSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerPodSpec.
|
||||||
@@ -939,6 +944,11 @@ func (in *RunnerSetSpec) DeepCopyInto(out *RunnerSetSpec) {
|
|||||||
in, out := &in.EffectiveTime, &out.EffectiveTime
|
in, out := &in.EffectiveTime, &out.EffectiveTime
|
||||||
*out = (*in).DeepCopy()
|
*out = (*in).DeepCopy()
|
||||||
}
|
}
|
||||||
|
if in.WorkVolumeClaimTemplate != nil {
|
||||||
|
in, out := &in.WorkVolumeClaimTemplate, &out.WorkVolumeClaimTemplate
|
||||||
|
*out = new(WorkVolumeClaimTemplate)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
in.StatefulSetSpec.DeepCopyInto(&out.StatefulSetSpec)
|
in.StatefulSetSpec.DeepCopyInto(&out.StatefulSetSpec)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1126,6 +1136,27 @@ func (in *ScheduledOverride) DeepCopy() *ScheduledOverride {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *WorkVolumeClaimTemplate) DeepCopyInto(out *WorkVolumeClaimTemplate) {
|
||||||
|
*out = *in
|
||||||
|
if in.AccessModes != nil {
|
||||||
|
in, out := &in.AccessModes, &out.AccessModes
|
||||||
|
*out = make([]v1.PersistentVolumeAccessMode, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
in.Resources.DeepCopyInto(&out.Resources)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkVolumeClaimTemplate.
|
||||||
|
func (in *WorkVolumeClaimTemplate) DeepCopy() *WorkVolumeClaimTemplate {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(WorkVolumeClaimTemplate)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *WorkflowJobSpec) DeepCopyInto(out *WorkflowJobSpec) {
|
func (in *WorkflowJobSpec) DeepCopyInto(out *WorkflowJobSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
|||||||
@@ -15,10 +15,10 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.17.3
|
version: 0.20.0
|
||||||
|
|
||||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||||
appVersion: 0.22.3
|
appVersion: 0.25.0
|
||||||
|
|
||||||
home: https://github.com/actions-runner-controller/actions-runner-controller
|
home: https://github.com/actions-runner-controller/actions-runner-controller
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
|||||||
|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------|
|
|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------|
|
||||||
| `labels` | Set labels to apply to all resources in the chart | |
|
| `labels` | Set labels to apply to all resources in the chart | |
|
||||||
| `replicaCount` | Set the number of controller pods | 1 |
|
| `replicaCount` | Set the number of controller pods | 1 |
|
||||||
|
| `webhookPort` | Set the containerPort for the webhook Pod | 9443 |
|
||||||
| `syncPeriod` | Set the period in which the controler reconciles the desired runners count | 10m |
|
| `syncPeriod` | Set the period in which the controler reconciles the desired runners count | 10m |
|
||||||
| `enableLeaderElection` | Enable election configuration | true |
|
| `enableLeaderElection` | Enable election configuration | true |
|
||||||
| `leaderElectionId` | Set the election ID for the controller group | |
|
| `leaderElectionId` | Set the election ID for the controller group | |
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -44,6 +44,7 @@ spec:
|
|||||||
{{- if .Values.leaderElectionId }}
|
{{- if .Values.leaderElectionId }}
|
||||||
- "--leader-election-id={{ .Values.leaderElectionId }}"
|
- "--leader-election-id={{ .Values.leaderElectionId }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
- "--port={{ .Values.webhookPort }}"
|
||||||
- "--sync-period={{ .Values.syncPeriod }}"
|
- "--sync-period={{ .Values.syncPeriod }}"
|
||||||
- "--default-scale-down-delay={{ .Values.defaultScaleDownDelay }}"
|
- "--default-scale-down-delay={{ .Values.defaultScaleDownDelay }}"
|
||||||
- "--docker-image={{ .Values.image.dindSidecarRepositoryAndTag }}"
|
- "--docker-image={{ .Values.image.dindSidecarRepositoryAndTag }}"
|
||||||
@@ -125,7 +126,7 @@ spec:
|
|||||||
name: manager
|
name: manager
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 9443
|
- containerPort: {{ .Values.webhookPort }}
|
||||||
name: webhook-server
|
name: webhook-server
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
{{- if not .Values.metrics.proxy.enabled }}
|
{{- if not .Values.metrics.proxy.enabled }}
|
||||||
|
|||||||
@@ -1,16 +1,17 @@
|
|||||||
{{- if .Values.githubWebhookServer.ingress.enabled -}}
|
{{- if .Values.githubWebhookServer.ingress.enabled -}}
|
||||||
{{- $fullName := include "actions-runner-controller-github-webhook-server.fullname" . -}}
|
{{- $fullName := include "actions-runner-controller-github-webhook-server.fullname" . -}}
|
||||||
{{- $svcPort := (index .Values.githubWebhookServer.service.ports 0).port -}}
|
{{- $svcPort := (index .Values.githubWebhookServer.service.ports 0).port -}}
|
||||||
{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
|
{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
|
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" }}
|
||||||
apiVersion: networking.k8s.io/v1beta1
|
apiVersion: networking.k8s.io/v1beta1
|
||||||
{{- else if .Capabilities.APIVersions.Has "extensions/v1beta1" }}
|
{{- else if .Capabilities.APIVersions.Has "extensions/v1beta1/Ingress" }}
|
||||||
apiVersion: extensions/v1beta1
|
apiVersion: extensions/v1beta1
|
||||||
{{- end }}
|
{{- end }}
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ $fullName }}
|
name: {{ $fullName }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
{{- with .Values.githubWebhookServer.ingress.annotations }}
|
{{- with .Values.githubWebhookServer.ingress.annotations }}
|
||||||
@@ -36,13 +37,16 @@ spec:
|
|||||||
- host: {{ .host | quote }}
|
- host: {{ .host | quote }}
|
||||||
http:
|
http:
|
||||||
paths:
|
paths:
|
||||||
|
{{- if .extraPaths }}
|
||||||
|
{{- toYaml .extraPaths | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
{{- range .paths }}
|
{{- range .paths }}
|
||||||
- path: {{ .path }}
|
- path: {{ .path }}
|
||||||
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
|
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
||||||
pathType: {{ .pathType }}
|
pathType: {{ .pathType }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
backend:
|
backend:
|
||||||
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
|
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
||||||
service:
|
service:
|
||||||
name: {{ $fullName }}
|
name: {{ $fullName }}
|
||||||
port:
|
port:
|
||||||
|
|||||||
@@ -12,5 +12,17 @@ data:
|
|||||||
{{- if .Values.githubWebhookServer.secret.github_webhook_secret_token }}
|
{{- if .Values.githubWebhookServer.secret.github_webhook_secret_token }}
|
||||||
github_webhook_secret_token: {{ .Values.githubWebhookServer.secret.github_webhook_secret_token | toString | b64enc }}
|
github_webhook_secret_token: {{ .Values.githubWebhookServer.secret.github_webhook_secret_token | toString | b64enc }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if .Values.githubWebhookServer.secret.github_app_id }}
|
||||||
|
github_app_id: {{ .Values.githubWebhookServer.secret.github_app_id | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.githubWebhookServer.secret.github_app_installation_id }}
|
||||||
|
github_app_installation_id: {{ .Values.githubWebhookServer.secret.github_app_installation_id | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.githubWebhookServer.secret.github_app_private_key }}
|
||||||
|
github_app_private_key: {{ .Values.githubWebhookServer.secret.github_app_private_key | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.githubWebhookServer.secret.github_token }}
|
||||||
|
github_token: {{ .Values.githubWebhookServer.secret.github_token | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -195,6 +195,28 @@ rules:
|
|||||||
verbs:
|
verbs:
|
||||||
- create
|
- create
|
||||||
- patch
|
- patch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- persistentvolumeclaims
|
||||||
|
verbs:
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- persistentvolumes
|
||||||
|
verbs:
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- coordination.k8s.io
|
- coordination.k8s.io
|
||||||
resources:
|
resources:
|
||||||
@@ -228,3 +250,11 @@ rules:
|
|||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
- watch
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ spec:
|
|||||||
type: {{ .Values.service.type }}
|
type: {{ .Values.service.type }}
|
||||||
ports:
|
ports:
|
||||||
- port: 443
|
- port: 443
|
||||||
targetPort: 9443
|
targetPort: {{ .Values.webhookPort }}
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
name: https
|
name: https
|
||||||
selector:
|
selector:
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ labels: {}
|
|||||||
|
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
|
|
||||||
|
webhookPort: 9443
|
||||||
syncPeriod: 1m
|
syncPeriod: 1m
|
||||||
defaultScaleDownDelay: 10m
|
defaultScaleDownDelay: 10m
|
||||||
|
|
||||||
@@ -108,7 +109,7 @@ metrics:
|
|||||||
enabled: true
|
enabled: true
|
||||||
image:
|
image:
|
||||||
repository: quay.io/brancz/kube-rbac-proxy
|
repository: quay.io/brancz/kube-rbac-proxy
|
||||||
tag: v0.11.0
|
tag: v0.13.0
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
{}
|
{}
|
||||||
@@ -182,6 +183,13 @@ githubWebhookServer:
|
|||||||
name: "github-webhook-server"
|
name: "github-webhook-server"
|
||||||
### GitHub Webhook Configuration
|
### GitHub Webhook Configuration
|
||||||
github_webhook_secret_token: ""
|
github_webhook_secret_token: ""
|
||||||
|
### GitHub Apps Configuration
|
||||||
|
## NOTE: IDs MUST be strings, use quotes
|
||||||
|
#github_app_id: ""
|
||||||
|
#github_app_installation_id: ""
|
||||||
|
#github_app_private_key: |
|
||||||
|
### GitHub PAT Configuration
|
||||||
|
#github_token: ""
|
||||||
imagePullSecrets: []
|
imagePullSecrets: []
|
||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
fullnameOverride: ""
|
fullnameOverride: ""
|
||||||
@@ -223,6 +231,20 @@ githubWebhookServer:
|
|||||||
paths: []
|
paths: []
|
||||||
# - path: /*
|
# - path: /*
|
||||||
# pathType: ImplementationSpecific
|
# pathType: ImplementationSpecific
|
||||||
|
# Extra paths that are not automatically connected to the server. This is useful when working with annotation based services.
|
||||||
|
extraPaths: []
|
||||||
|
# - path: /*
|
||||||
|
# backend:
|
||||||
|
# serviceName: ssl-redirect
|
||||||
|
# servicePort: use-annotation
|
||||||
|
## for Kubernetes >=1.19 (when "networking.k8s.io/v1" is used)
|
||||||
|
# - path: /*
|
||||||
|
# pathType: Prefix
|
||||||
|
# backend:
|
||||||
|
# service:
|
||||||
|
# name: ssl-redirect
|
||||||
|
# port:
|
||||||
|
# name: use-annotation
|
||||||
tls: []
|
tls: []
|
||||||
# - secretName: chart-example-tls
|
# - secretName: chart-example-tls
|
||||||
# hosts:
|
# hosts:
|
||||||
|
|||||||
@@ -72,6 +72,7 @@ func main() {
|
|||||||
enableLeaderElection bool
|
enableLeaderElection bool
|
||||||
syncPeriod time.Duration
|
syncPeriod time.Duration
|
||||||
logLevel string
|
logLevel string
|
||||||
|
queueLimit int
|
||||||
|
|
||||||
ghClient *github.Client
|
ghClient *github.Client
|
||||||
)
|
)
|
||||||
@@ -92,6 +93,7 @@ func main() {
|
|||||||
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||||
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change")
|
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change")
|
||||||
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
|
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
|
||||||
|
flag.IntVar(&queueLimit, "queue-limit", controllers.DefaultQueueLimit, `The maximum length of the scale operation queue. The scale opration is enqueued per every matching webhook event, and the server returns a 500 HTTP status when the queue was already full on enqueue attempt.`)
|
||||||
flag.StringVar(&webhookSecretToken, "github-webhook-secret-token", "", "The personal access token of GitHub.")
|
flag.StringVar(&webhookSecretToken, "github-webhook-secret-token", "", "The personal access token of GitHub.")
|
||||||
flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.")
|
flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.")
|
||||||
flag.Int64Var(&c.AppID, "github-app-id", c.AppID, "The application ID of GitHub App.")
|
flag.Int64Var(&c.AppID, "github-app-id", c.AppID, "The application ID of GitHub App.")
|
||||||
@@ -164,6 +166,7 @@ func main() {
|
|||||||
SecretKeyBytes: []byte(webhookSecretToken),
|
SecretKeyBytes: []byte(webhookSecretToken),
|
||||||
Namespace: watchNamespace,
|
Namespace: watchNamespace,
|
||||||
GitHubClient: ghClient,
|
GitHubClient: ghClient,
|
||||||
|
QueueLimit: queueLimit,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = hraGitHubWebhook.SetupWithManager(mgr); err != nil {
|
if err = hraGitHubWebhook.SetupWithManager(mgr); err != nil {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -22,8 +22,6 @@ bases:
|
|||||||
- ../certmanager
|
- ../certmanager
|
||||||
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
||||||
#- ../prometheus
|
#- ../prometheus
|
||||||
# [GH_WEBHOOK_SERVER] To enable the GitHub webhook server, uncomment all sections with 'GH_WEBHOOK_SERVER'.
|
|
||||||
#- ../github-webhook-server
|
|
||||||
|
|
||||||
patchesStrategicMerge:
|
patchesStrategicMerge:
|
||||||
# Protect the /metrics endpoint by putting it behind auth.
|
# Protect the /metrics endpoint by putting it behind auth.
|
||||||
@@ -46,10 +44,6 @@ patchesStrategicMerge:
|
|||||||
# 'CERTMANAGER' needs to be enabled to use ca injection
|
# 'CERTMANAGER' needs to be enabled to use ca injection
|
||||||
- webhookcainjection_patch.yaml
|
- webhookcainjection_patch.yaml
|
||||||
|
|
||||||
# [GH_WEBHOOK_SERVER] To enable the GitHub webhook server, uncomment all sections with 'GH_WEBHOOK_SERVER'.
|
|
||||||
# Protect the GitHub webhook server metrics endpoint by putting it behind auth.
|
|
||||||
# - gh-webhook-server-auth-proxy-patch.yaml
|
|
||||||
|
|
||||||
# the following config is for teaching kustomize how to do var substitution
|
# the following config is for teaching kustomize how to do var substitution
|
||||||
vars:
|
vars:
|
||||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
||||||
|
|||||||
@@ -2,11 +2,14 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
|||||||
kind: Kustomization
|
kind: Kustomization
|
||||||
|
|
||||||
images:
|
images:
|
||||||
- name: controller
|
- name: controller
|
||||||
newName: summerwind/actions-runner-controller
|
newName: summerwind/actions-runner-controller
|
||||||
newTag: latest
|
newTag: latest
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
- deployment.yaml
|
- deployment.yaml
|
||||||
- rbac.yaml
|
- rbac.yaml
|
||||||
- service.yaml
|
- service.yaml
|
||||||
|
|
||||||
|
patchesStrategicMerge:
|
||||||
|
- gh-webhook-server-auth-proxy-patch.yaml
|
||||||
|
|||||||
@@ -202,6 +202,29 @@ rules:
|
|||||||
verbs:
|
verbs:
|
||||||
- create
|
- create
|
||||||
- patch
|
- patch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- persistentvolumeclaims
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- persistentvolumes
|
||||||
|
verbs:
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
@@ -226,3 +249,12 @@ rules:
|
|||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
- watch
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
|||||||
@@ -9,7 +9,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -314,22 +316,52 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
|||||||
numRunners int
|
numRunners int
|
||||||
numRunnersRegistered int
|
numRunnersRegistered int
|
||||||
numRunnersBusy int
|
numRunnersBusy int
|
||||||
|
numTerminatingBusy int
|
||||||
)
|
)
|
||||||
|
|
||||||
numRunners = len(runnerMap)
|
numRunners = len(runnerMap)
|
||||||
|
|
||||||
|
busyTerminatingRunnerPods := map[string]struct{}{}
|
||||||
|
|
||||||
|
kindLabel := LabelKeyRunnerDeploymentName
|
||||||
|
if hra.Spec.ScaleTargetRef.Kind == "RunnerSet" {
|
||||||
|
kindLabel = LabelKeyRunnerSetName
|
||||||
|
}
|
||||||
|
|
||||||
|
var runnerPodList corev1.PodList
|
||||||
|
if err := r.Client.List(ctx, &runnerPodList, client.InNamespace(hra.Namespace), client.MatchingLabels(map[string]string{
|
||||||
|
kindLabel: hra.Spec.ScaleTargetRef.Name,
|
||||||
|
})); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range runnerPodList.Items {
|
||||||
|
if p.Annotations[AnnotationKeyUnregistrationFailureMessage] != "" {
|
||||||
|
busyTerminatingRunnerPods[p.Name] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, runner := range runners {
|
for _, runner := range runners {
|
||||||
if _, ok := runnerMap[*runner.Name]; ok {
|
if _, ok := runnerMap[*runner.Name]; ok {
|
||||||
numRunnersRegistered++
|
numRunnersRegistered++
|
||||||
|
|
||||||
if runner.GetBusy() {
|
if runner.GetBusy() {
|
||||||
numRunnersBusy++
|
numRunnersBusy++
|
||||||
|
} else if _, ok := busyTerminatingRunnerPods[*runner.Name]; ok {
|
||||||
|
numTerminatingBusy++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
delete(busyTerminatingRunnerPods, *runner.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remaining busyTerminatingRunnerPods are runners that were not on the ListRunners API response yet
|
||||||
|
for range busyTerminatingRunnerPods {
|
||||||
|
numTerminatingBusy++
|
||||||
|
}
|
||||||
|
|
||||||
var desiredReplicas int
|
var desiredReplicas int
|
||||||
fractionBusy := float64(numRunnersBusy) / float64(desiredReplicasBefore)
|
fractionBusy := float64(numRunnersBusy+numTerminatingBusy) / float64(desiredReplicasBefore)
|
||||||
if fractionBusy >= scaleUpThreshold {
|
if fractionBusy >= scaleUpThreshold {
|
||||||
if scaleUpAdjustment > 0 {
|
if scaleUpAdjustment > 0 {
|
||||||
desiredReplicas = desiredReplicasBefore + scaleUpAdjustment
|
desiredReplicas = desiredReplicasBefore + scaleUpAdjustment
|
||||||
@@ -358,6 +390,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
|||||||
"num_runners", numRunners,
|
"num_runners", numRunners,
|
||||||
"num_runners_registered", numRunnersRegistered,
|
"num_runners_registered", numRunnersRegistered,
|
||||||
"num_runners_busy", numRunnersBusy,
|
"num_runners_busy", numRunnersBusy,
|
||||||
|
"num_terminating_busy", numTerminatingBusy,
|
||||||
"namespace", hra.Namespace,
|
"namespace", hra.Namespace,
|
||||||
"kind", st.kind,
|
"kind", st.kind,
|
||||||
"name", st.st,
|
"name", st.st,
|
||||||
|
|||||||
@@ -4,17 +4,22 @@ import "time"
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
LabelKeyRunnerSetName = "runnerset-name"
|
LabelKeyRunnerSetName = "runnerset-name"
|
||||||
|
LabelKeyRunner = "actions-runner"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// This names requires at least one slash to work.
|
// This names requires at least one slash to work.
|
||||||
// See https://github.com/google/knative-gcp/issues/378
|
// See https://github.com/google/knative-gcp/issues/378
|
||||||
runnerPodFinalizerName = "actions.summerwind.dev/runner-pod"
|
runnerPodFinalizerName = "actions.summerwind.dev/runner-pod"
|
||||||
|
runnerLinkedResourcesFinalizerName = "actions.summerwind.dev/linked-resources"
|
||||||
|
|
||||||
annotationKeyPrefix = "actions-runner/"
|
annotationKeyPrefix = "actions-runner/"
|
||||||
|
|
||||||
AnnotationKeyLastRegistrationCheckTime = "actions-runner-controller/last-registration-check-time"
|
AnnotationKeyLastRegistrationCheckTime = "actions-runner-controller/last-registration-check-time"
|
||||||
|
|
||||||
|
// AnnotationKeyUnregistrationFailureMessage is the annotation that is added onto the pod once it failed to be unregistered from GitHub due to e.g. 422 error
|
||||||
|
AnnotationKeyUnregistrationFailureMessage = annotationKeyPrefix + "unregistration-failure-message"
|
||||||
|
|
||||||
// AnnotationKeyUnregistrationCompleteTimestamp is the annotation that is added onto the pod once the previously started unregistration process has been completed.
|
// AnnotationKeyUnregistrationCompleteTimestamp is the annotation that is added onto the pod once the previously started unregistration process has been completed.
|
||||||
AnnotationKeyUnregistrationCompleteTimestamp = annotationKeyPrefix + "unregistration-complete-timestamp"
|
AnnotationKeyUnregistrationCompleteTimestamp = annotationKeyPrefix + "unregistration-complete-timestamp"
|
||||||
|
|
||||||
@@ -47,8 +52,6 @@ const (
|
|||||||
// A pod that is timed out can be terminated if needed.
|
// A pod that is timed out can be terminated if needed.
|
||||||
registrationTimeout = 10 * time.Minute
|
registrationTimeout = 10 * time.Minute
|
||||||
|
|
||||||
defaultRegistrationCheckInterval = time.Minute
|
|
||||||
|
|
||||||
// DefaultRunnerPodRecreationDelayAfterWebhookScale is the delay until syncing the runners with the desired replicas
|
// DefaultRunnerPodRecreationDelayAfterWebhookScale is the delay until syncing the runners with the desired replicas
|
||||||
// after a webhook-based scale up.
|
// after a webhook-based scale up.
|
||||||
// This is used to prevent ARC from recreating completed runner pods that are deleted soon without being used at all.
|
// This is used to prevent ARC from recreating completed runner pods that are deleted soon without being used at all.
|
||||||
@@ -63,4 +66,7 @@ const (
|
|||||||
|
|
||||||
EnvVarRunnerName = "RUNNER_NAME"
|
EnvVarRunnerName = "RUNNER_NAME"
|
||||||
EnvVarRunnerToken = "RUNNER_TOKEN"
|
EnvVarRunnerToken = "RUNNER_TOKEN"
|
||||||
|
|
||||||
|
// defaultHookPath is path to the hook script used when the "containerMode: kubernetes" is specified
|
||||||
|
defaultRunnerHookPath = "/runner/k8s/index.js"
|
||||||
)
|
)
|
||||||
|
|||||||
207
controllers/horizontal_runner_autoscaler_batch_scale.go
Normal file
207
controllers/horizontal_runner_autoscaler_batch_scale.go
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
type batchScaler struct {
|
||||||
|
Ctx context.Context
|
||||||
|
Client client.Client
|
||||||
|
Log logr.Logger
|
||||||
|
interval time.Duration
|
||||||
|
|
||||||
|
queue chan *ScaleTarget
|
||||||
|
workerStart sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBatchScaler(ctx context.Context, client client.Client, log logr.Logger) *batchScaler {
|
||||||
|
return &batchScaler{
|
||||||
|
Ctx: ctx,
|
||||||
|
Client: client,
|
||||||
|
Log: log,
|
||||||
|
interval: 3 * time.Second,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type batchScaleOperation struct {
|
||||||
|
namespacedName types.NamespacedName
|
||||||
|
scaleOps []scaleOperation
|
||||||
|
}
|
||||||
|
|
||||||
|
type scaleOperation struct {
|
||||||
|
trigger v1alpha1.ScaleUpTrigger
|
||||||
|
log logr.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the scale target to the unbounded queue, blocking until the target is successfully added to the queue.
|
||||||
|
// All the targets in the queue are dequeued every 3 seconds, grouped by the HRA, and applied.
|
||||||
|
// In a happy path, batchScaler update each HRA only once, even though the HRA had two or more associated webhook events in the 3 seconds interval,
|
||||||
|
// which results in less K8s API calls and less HRA update conflicts in case your ARC installation receives a lot of webhook events
|
||||||
|
func (s *batchScaler) Add(st *ScaleTarget) {
|
||||||
|
if st == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.workerStart.Do(func() {
|
||||||
|
var expBackoff = []time.Duration{time.Second, 2 * time.Second, 4 * time.Second, 8 * time.Second, 16 * time.Second}
|
||||||
|
|
||||||
|
s.queue = make(chan *ScaleTarget)
|
||||||
|
|
||||||
|
log := s.Log
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
log.Info("Starting batch worker")
|
||||||
|
defer log.Info("Stopped batch worker")
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-s.Ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
log.V(2).Info("Batch worker is dequeueing operations")
|
||||||
|
|
||||||
|
batches := map[types.NamespacedName]batchScaleOperation{}
|
||||||
|
after := time.After(s.interval)
|
||||||
|
var ops uint
|
||||||
|
|
||||||
|
batch:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-after:
|
||||||
|
after = nil
|
||||||
|
break batch
|
||||||
|
case st := <-s.queue:
|
||||||
|
nsName := types.NamespacedName{
|
||||||
|
Namespace: st.HorizontalRunnerAutoscaler.Namespace,
|
||||||
|
Name: st.HorizontalRunnerAutoscaler.Name,
|
||||||
|
}
|
||||||
|
b, ok := batches[nsName]
|
||||||
|
if !ok {
|
||||||
|
b = batchScaleOperation{
|
||||||
|
namespacedName: nsName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.scaleOps = append(b.scaleOps, scaleOperation{
|
||||||
|
log: *st.log,
|
||||||
|
trigger: st.ScaleUpTrigger,
|
||||||
|
})
|
||||||
|
batches[nsName] = b
|
||||||
|
ops++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.V(2).Info("Batch worker dequeued operations", "ops", ops, "batches", len(batches))
|
||||||
|
|
||||||
|
retry:
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
failed := map[types.NamespacedName]batchScaleOperation{}
|
||||||
|
|
||||||
|
for nsName, b := range batches {
|
||||||
|
b := b
|
||||||
|
if err := s.batchScale(context.Background(), b); err != nil {
|
||||||
|
log.V(2).Info("Failed to scale due to error", "error", err)
|
||||||
|
failed[nsName] = b
|
||||||
|
} else {
|
||||||
|
log.V(2).Info("Successfully ran batch scale", "hra", b.namespacedName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(failed) == 0 {
|
||||||
|
break retry
|
||||||
|
}
|
||||||
|
|
||||||
|
batches = failed
|
||||||
|
|
||||||
|
delay := 16 * time.Second
|
||||||
|
if i < len(expBackoff) {
|
||||||
|
delay = expBackoff[i]
|
||||||
|
}
|
||||||
|
time.Sleep(delay)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
})
|
||||||
|
|
||||||
|
s.queue <- st
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *batchScaler) batchScale(ctx context.Context, batch batchScaleOperation) error {
|
||||||
|
var hra v1alpha1.HorizontalRunnerAutoscaler
|
||||||
|
|
||||||
|
if err := s.Client.Get(ctx, batch.namespacedName, &hra); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
copy := hra.DeepCopy()
|
||||||
|
|
||||||
|
copy.Spec.CapacityReservations = getValidCapacityReservations(copy)
|
||||||
|
|
||||||
|
var added, completed int
|
||||||
|
|
||||||
|
for _, scale := range batch.scaleOps {
|
||||||
|
amount := 1
|
||||||
|
|
||||||
|
if scale.trigger.Amount != 0 {
|
||||||
|
amount = scale.trigger.Amount
|
||||||
|
}
|
||||||
|
|
||||||
|
scale.log.V(2).Info("Adding capacity reservation", "amount", amount)
|
||||||
|
|
||||||
|
if amount > 0 {
|
||||||
|
now := time.Now()
|
||||||
|
copy.Spec.CapacityReservations = append(copy.Spec.CapacityReservations, v1alpha1.CapacityReservation{
|
||||||
|
EffectiveTime: metav1.Time{Time: now},
|
||||||
|
ExpirationTime: metav1.Time{Time: now.Add(scale.trigger.Duration.Duration)},
|
||||||
|
Replicas: amount,
|
||||||
|
})
|
||||||
|
|
||||||
|
added += amount
|
||||||
|
} else if amount < 0 {
|
||||||
|
var reservations []v1alpha1.CapacityReservation
|
||||||
|
|
||||||
|
var found bool
|
||||||
|
|
||||||
|
for _, r := range copy.Spec.CapacityReservations {
|
||||||
|
if !found && r.Replicas+amount == 0 {
|
||||||
|
found = true
|
||||||
|
} else {
|
||||||
|
reservations = append(reservations, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
copy.Spec.CapacityReservations = reservations
|
||||||
|
|
||||||
|
completed += amount
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
before := len(hra.Spec.CapacityReservations)
|
||||||
|
expired := before - len(copy.Spec.CapacityReservations)
|
||||||
|
after := len(copy.Spec.CapacityReservations)
|
||||||
|
|
||||||
|
s.Log.V(1).Info(
|
||||||
|
fmt.Sprintf("Updating hra %s for capacityReservations update", hra.Name),
|
||||||
|
"before", before,
|
||||||
|
"expired", expired,
|
||||||
|
"added", added,
|
||||||
|
"completed", completed,
|
||||||
|
"after", after,
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := s.Client.Update(ctx, copy); err != nil {
|
||||||
|
return fmt.Errorf("updating horizontalrunnerautoscaler to add capacity reservation: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -23,14 +23,14 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
gogithub "github.com/google/go-github/v39/github"
|
gogithub "github.com/google/go-github/v45/github"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
@@ -46,6 +46,8 @@ const (
|
|||||||
|
|
||||||
keyPrefixEnterprise = "enterprises/"
|
keyPrefixEnterprise = "enterprises/"
|
||||||
keyRunnerGroup = "/group/"
|
keyRunnerGroup = "/group/"
|
||||||
|
|
||||||
|
DefaultQueueLimit = 100
|
||||||
)
|
)
|
||||||
|
|
||||||
// HorizontalRunnerAutoscalerGitHubWebhook autoscales a HorizontalRunnerAutoscaler and the RunnerDeployment on each
|
// HorizontalRunnerAutoscalerGitHubWebhook autoscales a HorizontalRunnerAutoscaler and the RunnerDeployment on each
|
||||||
@@ -68,6 +70,15 @@ type HorizontalRunnerAutoscalerGitHubWebhook struct {
|
|||||||
// Set to empty for letting it watch for all namespaces.
|
// Set to empty for letting it watch for all namespaces.
|
||||||
Namespace string
|
Namespace string
|
||||||
Name string
|
Name string
|
||||||
|
|
||||||
|
// QueueLimit is the maximum length of the bounded queue of scale targets and their associated operations
|
||||||
|
// A scale target is enqueued on each retrieval of each eligible webhook event, so that it is processed asynchronously.
|
||||||
|
QueueLimit int
|
||||||
|
|
||||||
|
worker *worker
|
||||||
|
workerInit sync.Once
|
||||||
|
workerStart sync.Once
|
||||||
|
batchCh chan *ScaleTarget
|
||||||
}
|
}
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||||
@@ -312,9 +323,19 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := autoscaler.tryScale(context.TODO(), target); err != nil {
|
autoscaler.workerInit.Do(func() {
|
||||||
log.Error(err, "could not scale up")
|
batchScaler := newBatchScaler(context.Background(), autoscaler.Client, autoscaler.Log)
|
||||||
|
|
||||||
|
queueLimit := autoscaler.QueueLimit
|
||||||
|
if queueLimit == 0 {
|
||||||
|
queueLimit = DefaultQueueLimit
|
||||||
|
}
|
||||||
|
autoscaler.worker = newWorker(context.Background(), queueLimit, batchScaler.Add)
|
||||||
|
})
|
||||||
|
|
||||||
|
target.log = &log
|
||||||
|
if ok := autoscaler.worker.Add(target); !ok {
|
||||||
|
log.Error(err, "Could not scale up due to queue full")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -383,6 +404,8 @@ func matchTriggerConditionAgainstEvent(types []string, eventAction *string) bool
|
|||||||
type ScaleTarget struct {
|
type ScaleTarget struct {
|
||||||
v1alpha1.HorizontalRunnerAutoscaler
|
v1alpha1.HorizontalRunnerAutoscaler
|
||||||
v1alpha1.ScaleUpTrigger
|
v1alpha1.ScaleUpTrigger
|
||||||
|
|
||||||
|
log *logr.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) searchScaleTargets(hras []v1alpha1.HorizontalRunnerAutoscaler, f func(v1alpha1.ScaleUpTrigger) bool) []ScaleTarget {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) searchScaleTargets(hras []v1alpha1.HorizontalRunnerAutoscaler, f func(v1alpha1.ScaleUpTrigger) bool) []ScaleTarget {
|
||||||
@@ -501,6 +524,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleUpTargetWithF
|
|||||||
if autoscaler.GitHubClient != nil {
|
if autoscaler.GitHubClient != nil {
|
||||||
simu := &simulator.Simulator{
|
simu := &simulator.Simulator{
|
||||||
Client: autoscaler.GitHubClient,
|
Client: autoscaler.GitHubClient,
|
||||||
|
Log: log,
|
||||||
}
|
}
|
||||||
// Get available organization runner groups and enterprise runner groups for a repository
|
// Get available organization runner groups and enterprise runner groups for a repository
|
||||||
// These are the sum of runner groups with repository access = All repositories and runner groups
|
// These are the sum of runner groups with repository access = All repositories and runner groups
|
||||||
@@ -770,63 +794,6 @@ HRA:
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) tryScale(ctx context.Context, target *ScaleTarget) error {
|
|
||||||
if target == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
copy := target.HorizontalRunnerAutoscaler.DeepCopy()
|
|
||||||
|
|
||||||
amount := 1
|
|
||||||
|
|
||||||
if target.ScaleUpTrigger.Amount != 0 {
|
|
||||||
amount = target.ScaleUpTrigger.Amount
|
|
||||||
}
|
|
||||||
|
|
||||||
capacityReservations := getValidCapacityReservations(copy)
|
|
||||||
|
|
||||||
if amount > 0 {
|
|
||||||
now := time.Now()
|
|
||||||
copy.Spec.CapacityReservations = append(capacityReservations, v1alpha1.CapacityReservation{
|
|
||||||
EffectiveTime: metav1.Time{Time: now},
|
|
||||||
ExpirationTime: metav1.Time{Time: now.Add(target.ScaleUpTrigger.Duration.Duration)},
|
|
||||||
Replicas: amount,
|
|
||||||
})
|
|
||||||
} else if amount < 0 {
|
|
||||||
var reservations []v1alpha1.CapacityReservation
|
|
||||||
|
|
||||||
var found bool
|
|
||||||
|
|
||||||
for _, r := range capacityReservations {
|
|
||||||
if !found && r.Replicas+amount == 0 {
|
|
||||||
found = true
|
|
||||||
} else {
|
|
||||||
reservations = append(reservations, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
copy.Spec.CapacityReservations = reservations
|
|
||||||
}
|
|
||||||
|
|
||||||
before := len(target.HorizontalRunnerAutoscaler.Spec.CapacityReservations)
|
|
||||||
expired := before - len(capacityReservations)
|
|
||||||
after := len(copy.Spec.CapacityReservations)
|
|
||||||
|
|
||||||
autoscaler.Log.V(1).Info(
|
|
||||||
fmt.Sprintf("Patching hra %s for capacityReservations update", target.HorizontalRunnerAutoscaler.Name),
|
|
||||||
"before", before,
|
|
||||||
"expired", expired,
|
|
||||||
"amount", amount,
|
|
||||||
"after", after,
|
|
||||||
)
|
|
||||||
|
|
||||||
if err := autoscaler.Client.Patch(ctx, copy, client.MergeFrom(&target.HorizontalRunnerAutoscaler)); err != nil {
|
|
||||||
return fmt.Errorf("patching horizontalrunnerautoscaler to add capacity reservation: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getValidCapacityReservations(autoscaler *v1alpha1.HorizontalRunnerAutoscaler) []v1alpha1.CapacityReservation {
|
func getValidCapacityReservations(autoscaler *v1alpha1.HorizontalRunnerAutoscaler) []v1alpha1.CapacityReservation {
|
||||||
var capacityReservations []v1alpha1.CapacityReservation
|
var capacityReservations []v1alpha1.CapacityReservation
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package controllers
|
|||||||
import (
|
import (
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/pkg/actionsglob"
|
"github.com/actions-runner-controller/actions-runner-controller/pkg/actionsglob"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPullRequestEvent(event *github.PullRequestEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPullRequestEvent(event *github.PullRequestEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event *github.PushEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event *github.PushEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
@@ -15,10 +15,6 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event
|
|||||||
|
|
||||||
push := g.Push
|
push := g.Push
|
||||||
|
|
||||||
if push == nil {
|
return push != nil
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
|
|
||||||
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
|
|||||||
55
controllers/horizontal_runner_autoscaler_webhook_worker.go
Normal file
55
controllers/horizontal_runner_autoscaler_webhook_worker.go
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// worker is a worker that has a non-blocking bounded queue of scale targets, dequeues scale target and executes the scale operation one by one.
|
||||||
|
type worker struct {
|
||||||
|
scaleTargetQueue chan *ScaleTarget
|
||||||
|
work func(*ScaleTarget)
|
||||||
|
done chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newWorker(ctx context.Context, queueLimit int, work func(*ScaleTarget)) *worker {
|
||||||
|
w := &worker{
|
||||||
|
scaleTargetQueue: make(chan *ScaleTarget, queueLimit),
|
||||||
|
work: work,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(w.done)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case t := <-w.scaleTargetQueue:
|
||||||
|
work(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the scale target to the bounded queue, returning the result as a bool value. It returns true on successful enqueue, and returns false otherwise.
|
||||||
|
// When returned false, the queue is already full so the enqueue operation must be retried later.
|
||||||
|
// If the enqueue was triggered by an external source and there's no intermediate queue that we can use,
|
||||||
|
// you must instruct the source to resend the original request later.
|
||||||
|
// In case you're building a webhook server around this worker, this means that you must return a http error to the webhook server,
|
||||||
|
// so that (hopefully) the sender can resend the webhook event later, or at least the human operator can notice or be notified about the
|
||||||
|
// webhook develiery failure so that a manual retry can be done later.
|
||||||
|
func (w *worker) Add(st *ScaleTarget) bool {
|
||||||
|
select {
|
||||||
|
case w.scaleTargetQueue <- st:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *worker) Done() chan struct{} {
|
||||||
|
return w.done
|
||||||
|
}
|
||||||
@@ -0,0 +1,36 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWorker_Add(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
w := newWorker(ctx, 2, func(st *ScaleTarget) {})
|
||||||
|
require.True(t, w.Add(&ScaleTarget{}))
|
||||||
|
require.True(t, w.Add(&ScaleTarget{}))
|
||||||
|
require.False(t, w.Add(&ScaleTarget{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWorker_Work(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var count int
|
||||||
|
|
||||||
|
w := newWorker(ctx, 1, func(st *ScaleTarget) {
|
||||||
|
count++
|
||||||
|
cancel()
|
||||||
|
})
|
||||||
|
require.True(t, w.Add(&ScaleTarget{}))
|
||||||
|
require.False(t, w.Add(&ScaleTarget{}))
|
||||||
|
|
||||||
|
<-w.Done()
|
||||||
|
|
||||||
|
require.Equal(t, count, 1)
|
||||||
|
}
|
||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
github2 "github.com/actions-runner-controller/actions-runner-controller/github"
|
github2 "github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
||||||
|
|
||||||
@@ -1367,7 +1367,7 @@ func (env *testEnvironment) ExpectRegisteredNumberCountEventuallyEquals(want int
|
|||||||
|
|
||||||
return len(rs)
|
return len(rs)
|
||||||
},
|
},
|
||||||
time.Second*5, time.Millisecond*500).Should(Equal(want), optionalDescriptions...)
|
time.Second*10, time.Millisecond*500).Should(Equal(want), optionalDescriptions...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (env *testEnvironment) SendOrgPullRequestEvent(org, repo, branch, action string) {
|
func (env *testEnvironment) SendOrgPullRequestEvent(org, repo, branch, action string) {
|
||||||
|
|||||||
@@ -7,12 +7,43 @@ import (
|
|||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func newWorkGenericEphemeralVolume(t *testing.T, storageReq string) corev1.Volume {
|
||||||
|
GBs, err := resource.ParseQuantity(storageReq)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return corev1.Volume{
|
||||||
|
Name: "work",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
Ephemeral: &corev1.EphemeralVolumeSource{
|
||||||
|
VolumeClaimTemplate: &corev1.PersistentVolumeClaimTemplate{
|
||||||
|
Spec: corev1.PersistentVolumeClaimSpec{
|
||||||
|
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||||
|
corev1.ReadWriteOnce,
|
||||||
|
},
|
||||||
|
StorageClassName: strPtr("runner-work-dir"),
|
||||||
|
Resources: corev1.ResourceRequirements{
|
||||||
|
Requests: corev1.ResourceList{
|
||||||
|
corev1.ResourceStorage: GBs,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestNewRunnerPod(t *testing.T) {
|
func TestNewRunnerPod(t *testing.T) {
|
||||||
|
workGenericEphemeralVolume := newWorkGenericEphemeralVolume(t, "10Gi")
|
||||||
|
|
||||||
type testcase struct {
|
type testcase struct {
|
||||||
description string
|
description string
|
||||||
|
|
||||||
@@ -25,7 +56,7 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"actions-runner-controller/inject-registration-token": "true",
|
"actions-runner-controller/inject-registration-token": "true",
|
||||||
"runnerset-name": "runner",
|
"actions-runner": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
@@ -106,10 +137,6 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
Name: "DOCKER_CERT_PATH",
|
Name: "DOCKER_CERT_PATH",
|
||||||
Value: "/certs/client",
|
Value: "/certs/client",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Name: "RUNNER_FEATURE_FLAG_EPHEMERAL",
|
|
||||||
Value: "true",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
VolumeMounts: []corev1.VolumeMount{
|
VolumeMounts: []corev1.VolumeMount{
|
||||||
{
|
{
|
||||||
@@ -159,7 +186,7 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
RestartPolicy: corev1.RestartPolicyNever,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -171,7 +198,7 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"actions-runner-controller/inject-registration-token": "true",
|
"actions-runner-controller/inject-registration-token": "true",
|
||||||
"runnerset-name": "runner",
|
"actions-runner": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
@@ -228,10 +255,6 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Name: "RUNNER_FEATURE_FLAG_EPHEMERAL",
|
|
||||||
Value: "true",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
VolumeMounts: []corev1.VolumeMount{
|
VolumeMounts: []corev1.VolumeMount{
|
||||||
{
|
{
|
||||||
@@ -245,7 +268,7 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
RestartPolicy: corev1.RestartPolicyNever,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -253,7 +276,7 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"actions-runner-controller/inject-registration-token": "true",
|
"actions-runner-controller/inject-registration-token": "true",
|
||||||
"runnerset-name": "runner",
|
"actions-runner": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
@@ -310,10 +333,6 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Name: "RUNNER_FEATURE_FLAG_EPHEMERAL",
|
|
||||||
Value: "true",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
VolumeMounts: []corev1.VolumeMount{
|
VolumeMounts: []corev1.VolumeMount{
|
||||||
{
|
{
|
||||||
@@ -327,7 +346,7 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
RestartPolicy: corev1.RestartPolicyNever,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -400,8 +419,87 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
DockerEnabled: boolPtr(false),
|
DockerEnabled: boolPtr(false),
|
||||||
},
|
},
|
||||||
want: newTestPod(dockerDisabled, func(p *corev1.Pod) {
|
want: newTestPod(dockerDisabled, func(p *corev1.Pod) {
|
||||||
// TODO
|
p.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||||
// p.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Mount generic ephemeral volume onto work (with explicit volumeMount)",
|
||||||
|
template: corev1.Pod{
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
VolumeMounts: []corev1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "work",
|
||||||
|
MountPath: "/runner/_work",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Volumes: []corev1.Volume{
|
||||||
|
workGenericEphemeralVolume,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: newTestPod(base, func(p *corev1.Pod) {
|
||||||
|
p.Spec.Volumes = []corev1.Volume{
|
||||||
|
workGenericEphemeralVolume,
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "certs-client",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
p.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "work",
|
||||||
|
MountPath: "/runner/_work",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
MountPath: "/runner",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "certs-client",
|
||||||
|
MountPath: "/certs/client",
|
||||||
|
ReadOnly: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Mount generic ephemeral volume onto work (without explicit volumeMount)",
|
||||||
|
template: corev1.Pod{
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
Volumes: []corev1.Volume{
|
||||||
|
workGenericEphemeralVolume,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: newTestPod(base, func(p *corev1.Pod) {
|
||||||
|
p.Spec.Volumes = []corev1.Volume{
|
||||||
|
workGenericEphemeralVolume,
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "certs-client",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -417,14 +515,20 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
for i := range testcases {
|
for i := range testcases {
|
||||||
tc := testcases[i]
|
tc := testcases[i]
|
||||||
t.Run(tc.description, func(t *testing.T) {
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
got, err := newRunnerPod("runner", tc.template, tc.config, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, false)
|
got, err := newRunnerPod(tc.template, tc.config, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, tc.want, got)
|
require.Equal(t, tc.want, got)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func strPtr(s string) *string {
|
||||||
|
return &s
|
||||||
|
}
|
||||||
|
|
||||||
func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
||||||
|
workGenericEphemeralVolume := newWorkGenericEphemeralVolume(t, "10Gi")
|
||||||
|
|
||||||
type testcase struct {
|
type testcase struct {
|
||||||
description string
|
description string
|
||||||
|
|
||||||
@@ -442,7 +546,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"actions-runner-controller/inject-registration-token": "true",
|
"actions-runner-controller/inject-registration-token": "true",
|
||||||
"pod-template-hash": "8857b86c7",
|
"pod-template-hash": "8857b86c7",
|
||||||
"runnerset-name": "runner",
|
"actions-runner": "",
|
||||||
},
|
},
|
||||||
OwnerReferences: []metav1.OwnerReference{
|
OwnerReferences: []metav1.OwnerReference{
|
||||||
{
|
{
|
||||||
@@ -532,10 +636,6 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Name: "DOCKER_CERT_PATH",
|
Name: "DOCKER_CERT_PATH",
|
||||||
Value: "/certs/client",
|
Value: "/certs/client",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Name: "RUNNER_FEATURE_FLAG_EPHEMERAL",
|
|
||||||
Value: "true",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "RUNNER_NAME",
|
Name: "RUNNER_NAME",
|
||||||
Value: "runner",
|
Value: "runner",
|
||||||
@@ -593,7 +693,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
RestartPolicy: corev1.RestartPolicyNever,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -603,7 +703,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"actions-runner-controller/inject-registration-token": "true",
|
"actions-runner-controller/inject-registration-token": "true",
|
||||||
"pod-template-hash": "8857b86c7",
|
"pod-template-hash": "8857b86c7",
|
||||||
"runnerset-name": "runner",
|
"actions-runner": "",
|
||||||
},
|
},
|
||||||
OwnerReferences: []metav1.OwnerReference{
|
OwnerReferences: []metav1.OwnerReference{
|
||||||
{
|
{
|
||||||
@@ -669,10 +769,6 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Name: "RUNNER_FEATURE_FLAG_EPHEMERAL",
|
|
||||||
Value: "true",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "RUNNER_NAME",
|
Name: "RUNNER_NAME",
|
||||||
Value: "runner",
|
Value: "runner",
|
||||||
@@ -694,7 +790,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
RestartPolicy: corev1.RestartPolicyNever,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -704,7 +800,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"actions-runner-controller/inject-registration-token": "true",
|
"actions-runner-controller/inject-registration-token": "true",
|
||||||
"pod-template-hash": "8857b86c7",
|
"pod-template-hash": "8857b86c7",
|
||||||
"runnerset-name": "runner",
|
"actions-runner": "",
|
||||||
},
|
},
|
||||||
OwnerReferences: []metav1.OwnerReference{
|
OwnerReferences: []metav1.OwnerReference{
|
||||||
{
|
{
|
||||||
@@ -770,10 +866,6 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Name: "RUNNER_FEATURE_FLAG_EPHEMERAL",
|
|
||||||
Value: "true",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: "RUNNER_NAME",
|
Name: "RUNNER_NAME",
|
||||||
Value: "runner",
|
Value: "runner",
|
||||||
@@ -795,7 +887,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
RestartPolicy: corev1.RestartPolicyNever,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -904,7 +996,97 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
},
|
},
|
||||||
|
|
||||||
want: newTestPod(dockerDisabled, func(p *corev1.Pod) {
|
want: newTestPod(dockerDisabled, func(p *corev1.Pod) {
|
||||||
// p.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
p.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Mount generic ephemeral volume onto work (with explicit volumeMount)",
|
||||||
|
runner: arcv1alpha1.Runner{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "runner",
|
||||||
|
},
|
||||||
|
Spec: arcv1alpha1.RunnerSpec{
|
||||||
|
RunnerPodSpec: arcv1alpha1.RunnerPodSpec{
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
VolumeMounts: []corev1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "work",
|
||||||
|
MountPath: "/runner/_work",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Volumes: []corev1.Volume{
|
||||||
|
workGenericEphemeralVolume,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: newTestPod(base, func(p *corev1.Pod) {
|
||||||
|
p.Spec.Volumes = []corev1.Volume{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "certs-client",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
workGenericEphemeralVolume,
|
||||||
|
}
|
||||||
|
p.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "work",
|
||||||
|
MountPath: "/runner/_work",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
MountPath: "/runner",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "certs-client",
|
||||||
|
MountPath: "/certs/client",
|
||||||
|
ReadOnly: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
description: "Mount generic ephemeral volume onto work (without explicit volumeMount)",
|
||||||
|
runner: arcv1alpha1.Runner{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "runner",
|
||||||
|
},
|
||||||
|
Spec: arcv1alpha1.RunnerSpec{
|
||||||
|
RunnerPodSpec: arcv1alpha1.RunnerPodSpec{
|
||||||
|
Volumes: []corev1.Volume{
|
||||||
|
workGenericEphemeralVolume,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: newTestPod(base, func(p *corev1.Pod) {
|
||||||
|
p.Spec.Volumes = []corev1.Volume{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "certs-client",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
workGenericEphemeralVolume,
|
||||||
|
}
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
74
controllers/persistent_volume_claim_controller.go
Normal file
74
controllers/persistent_volume_claim_controller.go
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The actions-runner-controller authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RunnerPersistentVolumeClaimReconciler reconciles a PersistentVolume object
|
||||||
|
type RunnerPersistentVolumeClaimReconciler struct {
|
||||||
|
client.Client
|
||||||
|
Log logr.Logger
|
||||||
|
Recorder record.EventRecorder
|
||||||
|
Scheme *runtime.Scheme
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
|
||||||
|
func (r *RunnerPersistentVolumeClaimReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
|
log := r.Log.WithValues("pvc", req.NamespacedName)
|
||||||
|
|
||||||
|
var pvc corev1.PersistentVolumeClaim
|
||||||
|
if err := r.Get(ctx, req.NamespacedName, &pvc); err != nil {
|
||||||
|
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := syncPVC(ctx, r.Client, log, req.Namespace, &pvc)
|
||||||
|
|
||||||
|
if res == nil {
|
||||||
|
res = &ctrl.Result{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return *res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunnerPersistentVolumeClaimReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
name := "runnerpersistentvolumeclaim-controller"
|
||||||
|
if r.Name != "" {
|
||||||
|
name = r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
For(&corev1.PersistentVolumeClaim{}).
|
||||||
|
Named(name).
|
||||||
|
Complete(r)
|
||||||
|
}
|
||||||
72
controllers/persistent_volume_controller.go
Normal file
72
controllers/persistent_volume_controller.go
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2022 The actions-runner-controller authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RunnerPersistentVolumeReconciler reconciles a PersistentVolume object
|
||||||
|
type RunnerPersistentVolumeReconciler struct {
|
||||||
|
client.Client
|
||||||
|
Log logr.Logger
|
||||||
|
Recorder record.EventRecorder
|
||||||
|
Scheme *runtime.Scheme
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
|
||||||
|
func (r *RunnerPersistentVolumeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
|
log := r.Log.WithValues("pv", req.NamespacedName)
|
||||||
|
|
||||||
|
var pv corev1.PersistentVolume
|
||||||
|
if err := r.Get(ctx, req.NamespacedName, &pv); err != nil {
|
||||||
|
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := syncPV(ctx, r.Client, log, req.Namespace, &pv)
|
||||||
|
if res == nil {
|
||||||
|
res = &ctrl.Result{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return *res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunnerPersistentVolumeReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
name := "runnerpersistentvolume-controller"
|
||||||
|
if r.Name != "" {
|
||||||
|
name = r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
For(&corev1.PersistentVolume{}).
|
||||||
|
Named(name).
|
||||||
|
Complete(r)
|
||||||
|
}
|
||||||
@@ -78,9 +78,7 @@ func (t *PodRunnerTokenInjector) Handle(ctx context.Context, req admission.Reque
|
|||||||
|
|
||||||
updated.Annotations[AnnotationKeyTokenExpirationDate] = ts
|
updated.Annotations[AnnotationKeyTokenExpirationDate] = ts
|
||||||
|
|
||||||
if pod.Spec.RestartPolicy != corev1.RestartPolicyOnFailure {
|
forceRunnerPodRestartPolicyNever(updated)
|
||||||
updated.Spec.RestartPolicy = corev1.RestartPolicyOnFailure
|
|
||||||
}
|
|
||||||
|
|
||||||
buf, err := json.Marshal(updated)
|
buf, err := json.Marshal(updated)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -18,7 +18,9 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -47,14 +49,10 @@ const (
|
|||||||
|
|
||||||
retryDelayOnGitHubAPIRateLimitError = 30 * time.Second
|
retryDelayOnGitHubAPIRateLimitError = 30 * time.Second
|
||||||
|
|
||||||
// This is an annotation internal to actions-runner-controller and can change in backward-incompatible ways
|
|
||||||
annotationKeyRegistrationOnly = "actions-runner-controller/registration-only"
|
|
||||||
|
|
||||||
EnvVarOrg = "RUNNER_ORG"
|
EnvVarOrg = "RUNNER_ORG"
|
||||||
EnvVarRepo = "RUNNER_REPO"
|
EnvVarRepo = "RUNNER_REPO"
|
||||||
EnvVarEnterprise = "RUNNER_ENTERPRISE"
|
EnvVarEnterprise = "RUNNER_ENTERPRISE"
|
||||||
EnvVarEphemeral = "RUNNER_EPHEMERAL"
|
EnvVarEphemeral = "RUNNER_EPHEMERAL"
|
||||||
EnvVarRunnerFeatureFlagEphemeral = "RUNNER_FEATURE_FLAG_EPHEMERAL"
|
|
||||||
EnvVarTrue = "true"
|
EnvVarTrue = "true"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -80,6 +78,7 @@ type RunnerReconciler struct {
|
|||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/finalizers,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/status,verbs=get;update;patch
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/status,verbs=get;update;patch
|
||||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;delete
|
||||||
// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
|
||||||
@@ -116,6 +115,7 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
|||||||
// Pod was not found
|
// Pod was not found
|
||||||
return r.processRunnerDeletion(runner, ctx, log, nil)
|
return r.processRunnerDeletion(runner, ctx, log, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return r.processRunnerDeletion(runner, ctx, log, &pod)
|
return r.processRunnerDeletion(runner, ctx, log, &pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -207,6 +207,24 @@ func runnerPodOrContainerIsStopped(pod *corev1.Pod) bool {
|
|||||||
return stopped
|
return stopped
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ephemeralRunnerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {
|
||||||
|
if getRunnerEnv(pod, "RUNNER_EPHEMERAL") != "true" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, status := range pod.Status.ContainerStatuses {
|
||||||
|
if status.Name != containerName {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
status := status
|
||||||
|
|
||||||
|
return &status
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *RunnerReconciler) processRunnerDeletion(runner v1alpha1.Runner, ctx context.Context, log logr.Logger, pod *corev1.Pod) (reconcile.Result, error) {
|
func (r *RunnerReconciler) processRunnerDeletion(runner v1alpha1.Runner, ctx context.Context, log logr.Logger, pod *corev1.Pod) (reconcile.Result, error) {
|
||||||
finalizers, removed := removeFinalizer(runner.ObjectMeta.Finalizers, finalizerName)
|
finalizers, removed := removeFinalizer(runner.ObjectMeta.Finalizers, finalizerName)
|
||||||
|
|
||||||
@@ -350,30 +368,65 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
if len(runner.Spec.Containers) == 0 {
|
if len(runner.Spec.Containers) == 0 {
|
||||||
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
||||||
Name: "runner",
|
Name: "runner",
|
||||||
ImagePullPolicy: runner.Spec.ImagePullPolicy,
|
|
||||||
EnvFrom: runner.Spec.EnvFrom,
|
|
||||||
Env: runner.Spec.Env,
|
|
||||||
Resources: runner.Spec.Resources,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if (runner.Spec.DockerEnabled == nil || *runner.Spec.DockerEnabled) && (runner.Spec.DockerdWithinRunnerContainer == nil || !*runner.Spec.DockerdWithinRunnerContainer) {
|
if (runner.Spec.DockerEnabled == nil || *runner.Spec.DockerEnabled) && (runner.Spec.DockerdWithinRunnerContainer == nil || !*runner.Spec.DockerdWithinRunnerContainer) {
|
||||||
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
||||||
Name: "docker",
|
Name: "docker",
|
||||||
VolumeMounts: runner.Spec.DockerVolumeMounts,
|
|
||||||
Resources: runner.Spec.DockerdContainerResources,
|
|
||||||
Env: runner.Spec.DockerEnv,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
template.Spec.Containers = runner.Spec.Containers
|
template.Spec.Containers = runner.Spec.Containers
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i, c := range template.Spec.Containers {
|
||||||
|
switch c.Name {
|
||||||
|
case "runner":
|
||||||
|
if c.ImagePullPolicy == "" {
|
||||||
|
template.Spec.Containers[i].ImagePullPolicy = runner.Spec.ImagePullPolicy
|
||||||
|
}
|
||||||
|
if len(c.EnvFrom) == 0 {
|
||||||
|
template.Spec.Containers[i].EnvFrom = runner.Spec.EnvFrom
|
||||||
|
}
|
||||||
|
if len(c.Env) == 0 {
|
||||||
|
template.Spec.Containers[i].Env = runner.Spec.Env
|
||||||
|
}
|
||||||
|
if len(c.Resources.Requests) == 0 {
|
||||||
|
template.Spec.Containers[i].Resources.Requests = runner.Spec.Resources.Requests
|
||||||
|
}
|
||||||
|
if len(c.Resources.Limits) == 0 {
|
||||||
|
template.Spec.Containers[i].Resources.Limits = runner.Spec.Resources.Limits
|
||||||
|
}
|
||||||
|
case "docker":
|
||||||
|
if len(c.VolumeMounts) == 0 {
|
||||||
|
template.Spec.Containers[i].VolumeMounts = runner.Spec.DockerVolumeMounts
|
||||||
|
}
|
||||||
|
if len(c.Resources.Limits) == 0 {
|
||||||
|
template.Spec.Containers[i].Resources.Limits = runner.Spec.DockerdContainerResources.Limits
|
||||||
|
}
|
||||||
|
if len(c.Resources.Requests) == 0 {
|
||||||
|
template.Spec.Containers[i].Resources.Requests = runner.Spec.DockerdContainerResources.Requests
|
||||||
|
}
|
||||||
|
if len(c.Env) == 0 {
|
||||||
|
template.Spec.Containers[i].Env = runner.Spec.DockerEnv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
template.Spec.SecurityContext = runner.Spec.SecurityContext
|
template.Spec.SecurityContext = runner.Spec.SecurityContext
|
||||||
template.Spec.EnableServiceLinks = runner.Spec.EnableServiceLinks
|
template.Spec.EnableServiceLinks = runner.Spec.EnableServiceLinks
|
||||||
|
|
||||||
registrationOnly := metav1.HasAnnotation(runner.ObjectMeta, annotationKeyRegistrationOnly)
|
if runner.Spec.ContainerMode == "kubernetes" {
|
||||||
|
workDir := runner.Spec.WorkDir
|
||||||
|
if workDir == "" {
|
||||||
|
workDir = "/runner/_work"
|
||||||
|
}
|
||||||
|
if err := applyWorkVolumeClaimTemplateToPod(&template, runner.Spec.WorkVolumeClaimTemplate, workDir); err != nil {
|
||||||
|
return corev1.Pod{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pod, err := newRunnerPod(runner.Name, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubClient.GithubBaseURL, registrationOnly)
|
pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubClient.GithubBaseURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pod, err
|
return pod, err
|
||||||
}
|
}
|
||||||
@@ -385,6 +438,9 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
// if operater provides a work volume mount, use that
|
// if operater provides a work volume mount, use that
|
||||||
isPresent, _ := workVolumeMountPresent(runnerSpec.VolumeMounts)
|
isPresent, _ := workVolumeMountPresent(runnerSpec.VolumeMounts)
|
||||||
if isPresent {
|
if isPresent {
|
||||||
|
if runnerSpec.ContainerMode == "kubernetes" {
|
||||||
|
return pod, errors.New("volume mount \"work\" should be specified by workVolumeClaimTemplate in container mode kubernetes")
|
||||||
|
}
|
||||||
// remove work volume since it will be provided from runnerSpec.Volumes
|
// remove work volume since it will be provided from runnerSpec.Volumes
|
||||||
// if we don't remove it here we would get a duplicate key error, i.e. two volumes named work
|
// if we don't remove it here we would get a duplicate key error, i.e. two volumes named work
|
||||||
_, index := workVolumeMountPresent(pod.Spec.Containers[0].VolumeMounts)
|
_, index := workVolumeMountPresent(pod.Spec.Containers[0].VolumeMounts)
|
||||||
@@ -398,6 +454,9 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
// if operator provides a work volume. use that
|
// if operator provides a work volume. use that
|
||||||
isPresent, _ := workVolumePresent(runnerSpec.Volumes)
|
isPresent, _ := workVolumePresent(runnerSpec.Volumes)
|
||||||
if isPresent {
|
if isPresent {
|
||||||
|
if runnerSpec.ContainerMode == "kubernetes" {
|
||||||
|
return pod, errors.New("volume \"work\" should be specified by workVolumeClaimTemplate in container mode kubernetes")
|
||||||
|
}
|
||||||
_, index := workVolumePresent(pod.Spec.Volumes)
|
_, index := workVolumePresent(pod.Spec.Volumes)
|
||||||
|
|
||||||
// remove work volume since it will be provided from runnerSpec.Volumes
|
// remove work volume since it will be provided from runnerSpec.Volumes
|
||||||
@@ -407,6 +466,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
|
|
||||||
pod.Spec.Volumes = append(pod.Spec.Volumes, runnerSpec.Volumes...)
|
pod.Spec.Volumes = append(pod.Spec.Volumes, runnerSpec.Volumes...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(runnerSpec.InitContainers) != 0 {
|
if len(runnerSpec.InitContainers) != 0 {
|
||||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, runnerSpec.InitContainers...)
|
pod.Spec.InitContainers = append(pod.Spec.InitContainers, runnerSpec.InitContainers...)
|
||||||
}
|
}
|
||||||
@@ -437,6 +497,10 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
pod.Spec.Tolerations = runnerSpec.Tolerations
|
pod.Spec.Tolerations = runnerSpec.Tolerations
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if runnerSpec.PriorityClassName != "" {
|
||||||
|
pod.Spec.PriorityClassName = runnerSpec.PriorityClassName
|
||||||
|
}
|
||||||
|
|
||||||
if len(runnerSpec.TopologySpreadConstraints) != 0 {
|
if len(runnerSpec.TopologySpreadConstraints) != 0 {
|
||||||
pod.Spec.TopologySpreadConstraints = runnerSpec.TopologySpreadConstraints
|
pod.Spec.TopologySpreadConstraints = runnerSpec.TopologySpreadConstraints
|
||||||
}
|
}
|
||||||
@@ -487,7 +551,45 @@ func mutatePod(pod *corev1.Pod, token string) *corev1.Pod {
|
|||||||
return updated
|
return updated
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, registrationOnly bool) (corev1.Pod, error) {
|
func runnerHookEnvs(pod *corev1.Pod) ([]corev1.EnvVar, error) {
|
||||||
|
isRequireSameNode, err := isRequireSameNode(pod)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return []corev1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "ACTIONS_RUNNER_CONTAINER_HOOKS",
|
||||||
|
Value: defaultRunnerHookPath,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER",
|
||||||
|
Value: "true",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ACTIONS_RUNNER_POD_NAME",
|
||||||
|
ValueFrom: &corev1.EnvVarSource{
|
||||||
|
FieldRef: &corev1.ObjectFieldSelector{
|
||||||
|
FieldPath: "metadata.name",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ACTIONS_RUNNER_JOB_NAMESPACE",
|
||||||
|
ValueFrom: &corev1.EnvVarSource{
|
||||||
|
FieldRef: &corev1.ObjectFieldSelector{
|
||||||
|
FieldPath: "metadata.namespace",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "ACTIONS_RUNNER_REQUIRE_SAME_NODE",
|
||||||
|
Value: strconv.FormatBool(isRequireSameNode),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string) (corev1.Pod, error) {
|
||||||
var (
|
var (
|
||||||
privileged bool = true
|
privileged bool = true
|
||||||
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
||||||
@@ -496,10 +598,16 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||||||
dockerdInRunnerPrivileged bool = dockerdInRunner
|
dockerdInRunnerPrivileged bool = dockerdInRunner
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if containerMode == "kubernetes" {
|
||||||
|
dockerdInRunner = false
|
||||||
|
dockerEnabled = false
|
||||||
|
dockerdInRunnerPrivileged = false
|
||||||
|
}
|
||||||
|
|
||||||
template = *template.DeepCopy()
|
template = *template.DeepCopy()
|
||||||
|
|
||||||
// This label selector is used by default when rd.Spec.Selector is empty.
|
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunnerSetName, runnerName)
|
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunner, "")
|
||||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyPodMutation, LabelValuePodMutation)
|
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyPodMutation, LabelValuePodMutation)
|
||||||
|
|
||||||
workDir := runnerSpec.WorkDir
|
workDir := runnerSpec.WorkDir
|
||||||
@@ -559,14 +667,6 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if registrationOnly {
|
|
||||||
env = append(env, corev1.EnvVar{
|
|
||||||
Name: "RUNNER_REGISTRATION_ONLY",
|
|
||||||
Value: "true",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
var seLinuxOptions *corev1.SELinuxOptions
|
var seLinuxOptions *corev1.SELinuxOptions
|
||||||
if template.Spec.SecurityContext != nil {
|
if template.Spec.SecurityContext != nil {
|
||||||
seLinuxOptions = template.Spec.SecurityContext.SELinuxOptions
|
seLinuxOptions = template.Spec.SecurityContext.SELinuxOptions
|
||||||
@@ -590,6 +690,17 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if containerMode == "kubernetes" {
|
||||||
|
if dockerdContainer != nil {
|
||||||
|
template.Spec.Containers = append(template.Spec.Containers[:dockerdContainerIndex], template.Spec.Containers[dockerdContainerIndex+1:]...)
|
||||||
|
}
|
||||||
|
if runnerContainerIndex < runnerContainerIndex {
|
||||||
|
runnerContainerIndex--
|
||||||
|
}
|
||||||
|
dockerdContainer = nil
|
||||||
|
dockerdContainerIndex = -1
|
||||||
|
}
|
||||||
|
|
||||||
if runnerContainer == nil {
|
if runnerContainer == nil {
|
||||||
runnerContainerIndex = -1
|
runnerContainerIndex = -1
|
||||||
runnerContainer = &corev1.Container{
|
runnerContainer = &corev1.Container{
|
||||||
@@ -620,18 +731,26 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||||||
}
|
}
|
||||||
|
|
||||||
runnerContainer.Env = append(runnerContainer.Env, env...)
|
runnerContainer.Env = append(runnerContainer.Env, env...)
|
||||||
|
if containerMode == "kubernetes" {
|
||||||
|
hookEnvs, err := runnerHookEnvs(&template)
|
||||||
|
if err != nil {
|
||||||
|
return corev1.Pod{}, err
|
||||||
|
}
|
||||||
|
runnerContainer.Env = append(runnerContainer.Env, hookEnvs...)
|
||||||
|
}
|
||||||
|
|
||||||
if runnerContainer.SecurityContext == nil {
|
if runnerContainer.SecurityContext == nil {
|
||||||
runnerContainer.SecurityContext = &corev1.SecurityContext{}
|
runnerContainer.SecurityContext = &corev1.SecurityContext{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if runnerContainer.SecurityContext.Privileged == nil {
|
||||||
// Runner need to run privileged if it contains DinD
|
// Runner need to run privileged if it contains DinD
|
||||||
runnerContainer.SecurityContext.Privileged = &dockerdInRunnerPrivileged
|
runnerContainer.SecurityContext.Privileged = &dockerdInRunnerPrivileged
|
||||||
|
}
|
||||||
|
|
||||||
pod := template.DeepCopy()
|
pod := template.DeepCopy()
|
||||||
|
|
||||||
if pod.Spec.RestartPolicy == "" {
|
forceRunnerPodRestartPolicyNever(pod)
|
||||||
pod.Spec.RestartPolicy = "OnFailure"
|
|
||||||
}
|
|
||||||
|
|
||||||
if mtu := runnerSpec.DockerMTU; mtu != nil && dockerdInRunner {
|
if mtu := runnerSpec.DockerMTU; mtu != nil && dockerdInRunner {
|
||||||
runnerContainer.Env = append(runnerContainer.Env, []corev1.EnvVar{
|
runnerContainer.Env = append(runnerContainer.Env, []corev1.EnvVar{
|
||||||
@@ -709,6 +828,7 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ok, _ := workVolumePresent(pod.Spec.Volumes); !ok {
|
||||||
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||||
corev1.Volume{
|
corev1.Volume{
|
||||||
Name: "work",
|
Name: "work",
|
||||||
@@ -716,6 +836,10 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||||
corev1.Volume{
|
corev1.Volume{
|
||||||
Name: "certs-client",
|
Name: "certs-client",
|
||||||
VolumeSource: corev1.VolumeSource{
|
VolumeSource: corev1.VolumeSource{
|
||||||
@@ -724,11 +848,16 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if ok, _ := workVolumeMountPresent(runnerContainer.VolumeMounts); !ok {
|
||||||
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts,
|
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts,
|
||||||
corev1.VolumeMount{
|
corev1.VolumeMount{
|
||||||
Name: "work",
|
Name: "work",
|
||||||
MountPath: workDir,
|
MountPath: workDir,
|
||||||
},
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts,
|
||||||
corev1.VolumeMount{
|
corev1.VolumeMount{
|
||||||
Name: "certs-client",
|
Name: "certs-client",
|
||||||
MountPath: "/certs/client",
|
MountPath: "/certs/client",
|
||||||
@@ -830,15 +959,13 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO Remove this once we remove RUNNER_FEATURE_FLAG_EPHEMERAL from runner's entrypoint.sh
|
|
||||||
// and make --ephemeral the default option.
|
|
||||||
if getRunnerEnv(pod, EnvVarRunnerFeatureFlagEphemeral) == "" {
|
|
||||||
setRunnerEnv(pod, EnvVarRunnerFeatureFlagEphemeral, EnvVarTrue)
|
|
||||||
}
|
|
||||||
|
|
||||||
return *pod, nil
|
return *pod, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newRunnerPod(template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string) (corev1.Pod, error) {
|
||||||
|
return newRunnerPodWithContainerMode("", template, runnerSpec, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL)
|
||||||
|
}
|
||||||
|
|
||||||
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
name := "runner-controller"
|
name := "runner-controller"
|
||||||
if r.Name != "" {
|
if r.Name != "" {
|
||||||
@@ -901,3 +1028,71 @@ func workVolumeMountPresent(items []corev1.VolumeMount) (bool, int) {
|
|||||||
}
|
}
|
||||||
return false, 0
|
return false, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func applyWorkVolumeClaimTemplateToPod(pod *corev1.Pod, workVolumeClaimTemplate *v1alpha1.WorkVolumeClaimTemplate, workDir string) error {
|
||||||
|
if workVolumeClaimTemplate == nil {
|
||||||
|
return errors.New("work volume claim template must be specified in container mode kubernetes")
|
||||||
|
}
|
||||||
|
for i := range pod.Spec.Volumes {
|
||||||
|
if pod.Spec.Volumes[i].Name == "work" {
|
||||||
|
return fmt.Errorf("Work volume should not be specified in container mode kubernetes. workVolumeClaimTemplate field should be used instead.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pod.Spec.Volumes = append(pod.Spec.Volumes, workVolumeClaimTemplate.V1Volume())
|
||||||
|
|
||||||
|
var runnerContainer *corev1.Container
|
||||||
|
for i := range pod.Spec.Containers {
|
||||||
|
if pod.Spec.Containers[i].Name == "runner" {
|
||||||
|
runnerContainer = &pod.Spec.Containers[i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if runnerContainer == nil {
|
||||||
|
return fmt.Errorf("runner container is not present when applying work volume claim template")
|
||||||
|
}
|
||||||
|
|
||||||
|
if isPresent, _ := workVolumeMountPresent(runnerContainer.VolumeMounts); isPresent {
|
||||||
|
return fmt.Errorf("volume mount \"work\" should not be present on the runner container in container mode kubernetes")
|
||||||
|
}
|
||||||
|
|
||||||
|
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts, workVolumeClaimTemplate.V1VolumeMount(workDir))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isRequireSameNode specifies for the runner in kubernetes mode wether it should
|
||||||
|
// schedule jobs to the same node where the runner is
|
||||||
|
//
|
||||||
|
// This function should only be called in containerMode: kubernetes
|
||||||
|
func isRequireSameNode(pod *corev1.Pod) (bool, error) {
|
||||||
|
isPresent, index := workVolumePresent(pod.Spec.Volumes)
|
||||||
|
if !isPresent {
|
||||||
|
return true, errors.New("internal error: work volume mount must exist in containerMode: kubernetes")
|
||||||
|
}
|
||||||
|
|
||||||
|
if pod.Spec.Volumes[index].Ephemeral == nil || pod.Spec.Volumes[index].Ephemeral.VolumeClaimTemplate == nil {
|
||||||
|
return true, errors.New("containerMode: kubernetes should have pod.Spec.Volumes[].Ephemeral.VolumeClaimTemplate set")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, accessMode := range pod.Spec.Volumes[index].Ephemeral.VolumeClaimTemplate.Spec.AccessModes {
|
||||||
|
switch accessMode {
|
||||||
|
case corev1.ReadWriteOnce:
|
||||||
|
return true, nil
|
||||||
|
case corev1.ReadWriteMany:
|
||||||
|
default:
|
||||||
|
return true, errors.New("actions-runner-controller supports ReadWriteOnce and ReadWriteMany modes only")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func overwriteRunnerEnv(runner *v1alpha1.Runner, key string, value string) {
|
||||||
|
for i := range runner.Spec.Env {
|
||||||
|
if runner.Spec.Env[i].Name == key {
|
||||||
|
runner.Spec.Env[i].Value = value
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
runner.Spec.Env = append(runner.Spec.Env, corev1.EnvVar{Name: key, Value: value})
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
gogithub "github.com/google/go-github/v39/github"
|
gogithub "github.com/google/go-github/v45/github"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
@@ -67,6 +67,25 @@ func annotatePodOnce(ctx context.Context, c client.Client, log logr.Logger, pod
|
|||||||
|
|
||||||
return updated, nil
|
return updated, nil
|
||||||
}
|
}
|
||||||
|
func labelPod(ctx context.Context, c client.Client, log logr.Logger, pod *corev1.Pod, k, v string) (*corev1.Pod, error) {
|
||||||
|
if pod == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
updated := pod.DeepCopy()
|
||||||
|
if updated.Labels == nil {
|
||||||
|
updated.Labels = map[string]string{}
|
||||||
|
}
|
||||||
|
updated.Labels[k] = v
|
||||||
|
if err := c.Patch(ctx, updated, client.MergeFrom(pod)); err != nil {
|
||||||
|
log.Error(err, fmt.Sprintf("Failed to patch pod to have %s annotation", k))
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.V(2).Info("Labeled pod", "key", k, "value", v)
|
||||||
|
|
||||||
|
return updated, nil
|
||||||
|
}
|
||||||
|
|
||||||
// If the first return value is nil, it's safe to delete the runner pod.
|
// If the first return value is nil, it's safe to delete the runner pod.
|
||||||
func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, log logr.Logger, ghClient *github.Client, c client.Client, enterprise, organization, repository, runner string, pod *corev1.Pod) (*ctrl.Result, error) {
|
func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, log logr.Logger, ghClient *github.Client, c client.Client, enterprise, organization, repository, runner string, pod *corev1.Pod) (*ctrl.Result, error) {
|
||||||
@@ -113,10 +132,28 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
|
|||||||
// Happens e.g. when dind is in runner and run completes
|
// Happens e.g. when dind is in runner and run completes
|
||||||
log.Info("Runner pod has been stopped with a successful status.")
|
log.Info("Runner pod has been stopped with a successful status.")
|
||||||
} else if pod != nil && pod.Annotations[AnnotationKeyRunnerCompletionWaitStartTimestamp] != "" {
|
} else if pod != nil && pod.Annotations[AnnotationKeyRunnerCompletionWaitStartTimestamp] != "" {
|
||||||
log.Info("Runner pod is annotated to wait for completion")
|
ct := ephemeralRunnerContainerStatus(pod)
|
||||||
|
if ct == nil {
|
||||||
|
log.Info("Runner pod is annotated to wait for completion, and the runner container is not ephemeral")
|
||||||
|
|
||||||
return &ctrl.Result{RequeueAfter: retryDelay}, nil
|
return &ctrl.Result{RequeueAfter: retryDelay}, nil
|
||||||
} else if ok, err := unregisterRunner(ctx, ghClient, enterprise, organization, repository, runner, *runnerID); err != nil {
|
}
|
||||||
|
|
||||||
|
lts := ct.LastTerminationState.Terminated
|
||||||
|
if lts == nil {
|
||||||
|
log.Info("Runner pod is annotated to wait for completion, and the runner container is not restarting")
|
||||||
|
|
||||||
|
return &ctrl.Result{RequeueAfter: retryDelay}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent runner pod from stucking in Terminating.
|
||||||
|
// See https://github.com/actions-runner-controller/actions-runner-controller/issues/1369
|
||||||
|
log.Info("Deleting runner pod anyway because it has stopped prematurely. This may leave a dangling runner resource in GitHub Actions",
|
||||||
|
"lastState.exitCode", lts.ExitCode,
|
||||||
|
"lastState.message", lts.Message,
|
||||||
|
"pod.phase", pod.Status.Phase,
|
||||||
|
)
|
||||||
|
} else if ok, err := unregisterRunner(ctx, ghClient, enterprise, organization, repository, *runnerID); err != nil {
|
||||||
if errors.Is(err, &gogithub.RateLimitError{}) {
|
if errors.Is(err, &gogithub.RateLimitError{}) {
|
||||||
// We log the underlying error when we failed calling GitHub API to list or unregisters,
|
// We log the underlying error when we failed calling GitHub API to list or unregisters,
|
||||||
// or the runner is still busy.
|
// or the runner is still busy.
|
||||||
@@ -133,7 +170,10 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
|
|||||||
|
|
||||||
log.V(1).Info("Failed to unregister runner before deleting the pod.", "error", err)
|
log.V(1).Info("Failed to unregister runner before deleting the pod.", "error", err)
|
||||||
|
|
||||||
var runnerBusy bool
|
var (
|
||||||
|
runnerBusy bool
|
||||||
|
runnerUnregistrationFailureMessage string
|
||||||
|
)
|
||||||
|
|
||||||
errRes := &gogithub.ErrorResponse{}
|
errRes := &gogithub.ErrorResponse{}
|
||||||
if errors.As(err, &errRes) {
|
if errors.As(err, &errRes) {
|
||||||
@@ -155,6 +195,7 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
|
|||||||
}
|
}
|
||||||
|
|
||||||
runnerBusy = errRes.Response.StatusCode == 422
|
runnerBusy = errRes.Response.StatusCode == 422
|
||||||
|
runnerUnregistrationFailureMessage = errRes.Message
|
||||||
|
|
||||||
if runnerBusy && code != nil {
|
if runnerBusy && code != nil {
|
||||||
log.V(2).Info("Runner container has already stopped but the unregistration attempt failed. "+
|
log.V(2).Info("Runner container has already stopped but the unregistration attempt failed. "+
|
||||||
@@ -169,13 +210,18 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
|
|||||||
}
|
}
|
||||||
|
|
||||||
if runnerBusy {
|
if runnerBusy {
|
||||||
|
_, err := labelPod(ctx, c, log, pod, AnnotationKeyUnregistrationFailureMessage, runnerUnregistrationFailureMessage)
|
||||||
|
if err != nil {
|
||||||
|
return &ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
// We want to prevent spamming the deletion attemps but returning ctrl.Result with RequeueAfter doesn't
|
// We want to prevent spamming the deletion attemps but returning ctrl.Result with RequeueAfter doesn't
|
||||||
// work as the reconcilation can happen earlier due to pod status update.
|
// work as the reconcilation can happen earlier due to pod status update.
|
||||||
// For ephemeral runners, we can expect it to stop and unregister itself on completion.
|
// For ephemeral runners, we can expect it to stop and unregister itself on completion.
|
||||||
// So we can just wait for the completion without actively retrying unregistration.
|
// So we can just wait for the completion without actively retrying unregistration.
|
||||||
ephemeral := getRunnerEnv(pod, EnvVarEphemeral)
|
ephemeral := getRunnerEnv(pod, EnvVarEphemeral)
|
||||||
if ephemeral == "true" {
|
if ephemeral == "true" {
|
||||||
pod, err = annotatePodOnce(ctx, c, log, pod, AnnotationKeyRunnerCompletionWaitStartTimestamp, time.Now().Format(time.RFC3339))
|
_, err = annotatePodOnce(ctx, c, log, pod, AnnotationKeyRunnerCompletionWaitStartTimestamp, time.Now().Format(time.RFC3339))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &ctrl.Result{}, err
|
return &ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
@@ -352,7 +398,7 @@ func setRunnerEnv(pod *corev1.Pod, key, value string) {
|
|||||||
// There isn't a single right grace period that works for everyone.
|
// There isn't a single right grace period that works for everyone.
|
||||||
// The longer the grace period is, the earlier a cluster resource shortage can occur due to throttoled runner pod deletions,
|
// The longer the grace period is, the earlier a cluster resource shortage can occur due to throttoled runner pod deletions,
|
||||||
// while the shorter the grace period is, the more likely you may encounter the race issue.
|
// while the shorter the grace period is, the more likely you may encounter the race issue.
|
||||||
func unregisterRunner(ctx context.Context, client *github.Client, enterprise, org, repo, name string, id int64) (bool, error) {
|
func unregisterRunner(ctx context.Context, client *github.Client, enterprise, org, repo string, id int64) (bool, error) {
|
||||||
// For the record, historically ARC did not try to call RemoveRunner on a busy runner, but it's no longer true.
|
// For the record, historically ARC did not try to call RemoveRunner on a busy runner, but it's no longer true.
|
||||||
// The reason ARC did so was to let a runner running a job to not stop prematurely.
|
// The reason ARC did so was to let a runner running a job to not stop prematurely.
|
||||||
//
|
//
|
||||||
|
|||||||
22
controllers/runner_pod.go
Normal file
22
controllers/runner_pod.go
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import corev1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
// Force the runner pod managed by either RunnerDeployment and RunnerSet to have restartPolicy=Never.
|
||||||
|
// See https://github.com/actions-runner-controller/actions-runner-controller/issues/1369 for more context.
|
||||||
|
//
|
||||||
|
// This is to prevent runner pods from stucking in Terminating when a K8s node disappeared along with the runnr pod and the runner container within it.
|
||||||
|
//
|
||||||
|
// Previously, we used restartPolicy of OnFailure, it turned wrong later, and therefore we now set Never.
|
||||||
|
//
|
||||||
|
// When the restartPolicy is OnFailure and the node disappeared, runner pods on the node seem to stuck in state.terminated==nil, state.waiting!=nil, and state.lastTerminationState!=nil,
|
||||||
|
// and will ever become Running.
|
||||||
|
// It's probably due to that the node onto which the pods have been scheduled will ever come back, hence the container restart attempt swill ever succeed,
|
||||||
|
// the pods stuck waiting for successful restarts forever.
|
||||||
|
//
|
||||||
|
// By forcing runner pods to never restart, we hope there will be no chances of pods being stuck waiting.
|
||||||
|
func forceRunnerPodRestartPolicyNever(pod *corev1.Pod) {
|
||||||
|
if pod.Spec.RestartPolicy != corev1.RestartPolicyNever {
|
||||||
|
pod.Spec.RestartPolicy = corev1.RestartPolicyNever
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
@@ -50,6 +51,7 @@ type RunnerPodReconciler struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;update;patch;delete
|
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch
|
||||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
|
||||||
func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
@@ -60,8 +62,11 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, isRunnerPod := runnerPod.Labels[LabelKeyRunnerSetName]
|
_, isRunnerPod := runnerPod.Labels[LabelKeyRunner]
|
||||||
if !isRunnerPod {
|
_, isRunnerSetPod := runnerPod.Labels[LabelKeyRunnerSetName]
|
||||||
|
_, isRunnerDeploymentPod := runnerPod.Labels[LabelKeyRunnerDeploymentName]
|
||||||
|
|
||||||
|
if !isRunnerPod && !isRunnerSetPod && !isRunnerDeploymentPod {
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,6 +82,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
var enterprise, org, repo string
|
var enterprise, org, repo string
|
||||||
|
var isContainerMode bool
|
||||||
|
|
||||||
for _, e := range envvars {
|
for _, e := range envvars {
|
||||||
switch e.Name {
|
switch e.Name {
|
||||||
@@ -86,13 +92,20 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
org = e.Value
|
org = e.Value
|
||||||
case EnvVarRepo:
|
case EnvVarRepo:
|
||||||
repo = e.Value
|
repo = e.Value
|
||||||
|
case "ACTIONS_RUNNER_CONTAINER_HOOKS":
|
||||||
|
isContainerMode = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if runnerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
if runnerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
finalizers, added := addFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
finalizers, added := addFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||||
|
|
||||||
if added {
|
var cleanupFinalizersAdded bool
|
||||||
|
if isContainerMode {
|
||||||
|
finalizers, cleanupFinalizersAdded = addFinalizer(finalizers, runnerLinkedResourcesFinalizerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if added || cleanupFinalizersAdded {
|
||||||
newRunner := runnerPod.DeepCopy()
|
newRunner := runnerPod.DeepCopy()
|
||||||
newRunner.ObjectMeta.Finalizers = finalizers
|
newRunner.ObjectMeta.Finalizers = finalizers
|
||||||
|
|
||||||
@@ -108,6 +121,27 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
} else {
|
} else {
|
||||||
log.V(2).Info("Seen deletion-timestamp is already set")
|
log.V(2).Info("Seen deletion-timestamp is already set")
|
||||||
|
|
||||||
|
if finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerLinkedResourcesFinalizerName); removed {
|
||||||
|
if err := r.cleanupRunnerLinkedPods(ctx, &runnerPod, log); err != nil {
|
||||||
|
log.Info("Runner-linked pods clean up that has failed due to an error. If this persists, please manually remove the runner-linked pods to unblock ARC", "err", err.Error())
|
||||||
|
return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil
|
||||||
|
}
|
||||||
|
if err := r.cleanupRunnerLinkedSecrets(ctx, &runnerPod, log); err != nil {
|
||||||
|
log.Info("Runner-linked secrets clean up that has failed due to an error. If this persists, please manually remove the runner-linked secrets to unblock ARC", "err", err.Error())
|
||||||
|
return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil
|
||||||
|
}
|
||||||
|
patchedPod := runnerPod.DeepCopy()
|
||||||
|
patchedPod.ObjectMeta.Finalizers = finalizers
|
||||||
|
|
||||||
|
if err := r.Patch(ctx, patchedPod, client.MergeFrom(&runnerPod)); err != nil {
|
||||||
|
log.Error(err, "Failed to update runner for finalizer linked resources removal")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise the subsequent patch request can revive the removed finalizer and it will trigger a unnecessary reconcilation
|
||||||
|
runnerPod = *patchedPod
|
||||||
|
}
|
||||||
|
|
||||||
finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||||
|
|
||||||
if removed {
|
if removed {
|
||||||
@@ -222,3 +256,93 @@ func (r *RunnerPodReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||||||
Named(name).
|
Named(name).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *RunnerPodReconciler) cleanupRunnerLinkedPods(ctx context.Context, pod *corev1.Pod, log logr.Logger) error {
|
||||||
|
var runnerLinkedPodList corev1.PodList
|
||||||
|
if err := r.List(ctx, &runnerLinkedPodList, client.InNamespace(pod.Namespace), client.MatchingLabels(
|
||||||
|
map[string]string{
|
||||||
|
"runner-pod": pod.ObjectMeta.Name,
|
||||||
|
},
|
||||||
|
)); err != nil {
|
||||||
|
return fmt.Errorf("failed to list runner-linked pods: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
wg sync.WaitGroup
|
||||||
|
errs []error
|
||||||
|
)
|
||||||
|
for _, p := range runnerLinkedPodList.Items {
|
||||||
|
if !p.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
p := p
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := r.Delete(ctx, &p); err != nil {
|
||||||
|
if kerrors.IsNotFound(err) || kerrors.IsGone(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
errs = append(errs, fmt.Errorf("delete pod %q error: %v", p.ObjectMeta.Name, err))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if len(errs) > 0 {
|
||||||
|
for _, err := range errs {
|
||||||
|
log.Error(err, "failed to remove runner-linked pod")
|
||||||
|
}
|
||||||
|
return errors.New("failed to remove some runner linked pods")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunnerPodReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, pod *corev1.Pod, log logr.Logger) error {
|
||||||
|
log.V(2).Info("Listing runner-linked secrets to be deleted", "ns", pod.Namespace)
|
||||||
|
|
||||||
|
var runnerLinkedSecretList corev1.SecretList
|
||||||
|
if err := r.List(ctx, &runnerLinkedSecretList, client.InNamespace(pod.Namespace), client.MatchingLabels(
|
||||||
|
map[string]string{
|
||||||
|
"runner-pod": pod.ObjectMeta.Name,
|
||||||
|
},
|
||||||
|
)); err != nil {
|
||||||
|
return fmt.Errorf("failed to list runner-linked secrets: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
wg sync.WaitGroup
|
||||||
|
errs []error
|
||||||
|
)
|
||||||
|
for _, s := range runnerLinkedSecretList.Items {
|
||||||
|
if !s.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
s := s
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := r.Delete(ctx, &s); err != nil {
|
||||||
|
if kerrors.IsNotFound(err) || kerrors.IsGone(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
errs = append(errs, fmt.Errorf("delete secret %q error: %v", s.ObjectMeta.Name, err))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if len(errs) > 0 {
|
||||||
|
for _, err := range errs {
|
||||||
|
log.Error(err, "failed to remove runner-linked secret")
|
||||||
|
}
|
||||||
|
return errors.New("failed to remove some runner linked secrets")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -179,7 +179,10 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||||||
newDesiredReplicas := getIntOrDefault(desiredRS.Spec.Replicas, defaultReplicas)
|
newDesiredReplicas := getIntOrDefault(desiredRS.Spec.Replicas, defaultReplicas)
|
||||||
|
|
||||||
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
||||||
if currentDesiredReplicas != newDesiredReplicas {
|
//
|
||||||
|
// If we missed taking the EffectiveTime diff into account, you might end up experiencing scale-ups being delayed scale-down.
|
||||||
|
// See https://github.com/actions-runner-controller/actions-runner-controller/pull/1477#issuecomment-1164154496
|
||||||
|
if currentDesiredReplicas != newDesiredReplicas || newestSet.Spec.EffectiveTime != rd.Spec.EffectiveTime {
|
||||||
newestSet.Spec.Replicas = &newDesiredReplicas
|
newestSet.Spec.Replicas = &newDesiredReplicas
|
||||||
newestSet.Spec.EffectiveTime = rd.Spec.EffectiveTime
|
newestSet.Spec.EffectiveTime = rd.Spec.EffectiveTime
|
||||||
|
|
||||||
@@ -421,9 +424,7 @@ func getSelector(rd *v1alpha1.RunnerDeployment) *metav1.LabelSelector {
|
|||||||
func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []string, scheme *runtime.Scheme) (*v1alpha1.RunnerReplicaSet, error) {
|
func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []string, scheme *runtime.Scheme) (*v1alpha1.RunnerReplicaSet, error) {
|
||||||
newRSTemplate := *rd.Spec.Template.DeepCopy()
|
newRSTemplate := *rd.Spec.Template.DeepCopy()
|
||||||
|
|
||||||
for _, l := range commonRunnerLabels {
|
newRSTemplate.Spec.Labels = append(newRSTemplate.Spec.Labels, commonRunnerLabels...)
|
||||||
newRSTemplate.Spec.Labels = append(newRSTemplate.Spec.Labels, l)
|
|
||||||
}
|
|
||||||
|
|
||||||
templateHash := ComputeHash(&newRSTemplate)
|
templateHash := ComputeHash(&newRSTemplate)
|
||||||
|
|
||||||
|
|||||||
@@ -205,7 +205,3 @@ func (r *RunnerReplicaSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||||||
Named(name).
|
Named(name).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func registrationOnlyRunnerNameFor(rsName string) string {
|
|
||||||
return rsName + "-registration-only"
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -58,6 +58,7 @@ type RunnerSetReconciler struct {
|
|||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets/status,verbs=get;update;patch
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets/status,verbs=get;update;patch
|
||||||
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=apps,resources=statefulsets/status,verbs=get;update;patch
|
// +kubebuilder:rbac:groups=apps,resources=statefulsets/status,verbs=get;update;patch
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update
|
// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update
|
||||||
|
|
||||||
@@ -129,6 +130,12 @@ func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
owners = append(owners, &ss)
|
owners = append(owners, &ss)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if res, err := syncVolumes(ctx, r.Client, log, req.Namespace, runnerSet, statefulsets); err != nil {
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
} else if res != nil {
|
||||||
|
return *res, nil
|
||||||
|
}
|
||||||
|
|
||||||
res, err := syncRunnerPodsOwners(ctx, r.Client, log, effectiveTime, newDesiredReplicas, func() client.Object { return create.DeepCopy() }, ephemeral, owners)
|
res, err := syncRunnerPodsOwners(ctx, r.Client, log, effectiveTime, newDesiredReplicas, func() client.Object { return create.DeepCopy() }, ephemeral, owners)
|
||||||
if err != nil || res == nil {
|
if err != nil || res == nil {
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -181,16 +188,40 @@ var LabelValuePodMutation = "true"
|
|||||||
func (r *RunnerSetReconciler) newStatefulSet(runnerSet *v1alpha1.RunnerSet) (*appsv1.StatefulSet, error) {
|
func (r *RunnerSetReconciler) newStatefulSet(runnerSet *v1alpha1.RunnerSet) (*appsv1.StatefulSet, error) {
|
||||||
runnerSetWithOverrides := *runnerSet.Spec.DeepCopy()
|
runnerSetWithOverrides := *runnerSet.Spec.DeepCopy()
|
||||||
|
|
||||||
for _, l := range r.CommonRunnerLabels {
|
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, r.CommonRunnerLabels...)
|
||||||
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, l)
|
|
||||||
}
|
|
||||||
|
|
||||||
template := corev1.Pod{
|
template := corev1.Pod{
|
||||||
ObjectMeta: runnerSetWithOverrides.StatefulSetSpec.Template.ObjectMeta,
|
ObjectMeta: runnerSetWithOverrides.StatefulSetSpec.Template.ObjectMeta,
|
||||||
Spec: runnerSetWithOverrides.StatefulSetSpec.Template.Spec,
|
Spec: runnerSetWithOverrides.StatefulSetSpec.Template.Spec,
|
||||||
}
|
}
|
||||||
|
|
||||||
pod, err := newRunnerPod(runnerSet.Name, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubBaseURL, false)
|
if runnerSet.Spec.RunnerConfig.ContainerMode == "kubernetes" {
|
||||||
|
found := false
|
||||||
|
for i := range template.Spec.Containers {
|
||||||
|
if template.Spec.Containers[i].Name == containerName {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
||||||
|
Name: "runner",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
workDir := runnerSet.Spec.RunnerConfig.WorkDir
|
||||||
|
if workDir == "" {
|
||||||
|
workDir = "/runner/_work"
|
||||||
|
}
|
||||||
|
if err := applyWorkVolumeClaimTemplateToPod(&template, runnerSet.Spec.WorkVolumeClaimTemplate, workDir); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
template.Spec.ServiceAccountName = runnerSet.Spec.ServiceAccountName
|
||||||
|
}
|
||||||
|
|
||||||
|
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunnerSetName, runnerSet.Name)
|
||||||
|
|
||||||
|
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubBaseURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -605,3 +605,13 @@ func parseAndMatchRecurringPeriod(now time.Time, start, end, frequency, until st
|
|||||||
|
|
||||||
return MatchSchedule(now, startTime, endTime, RecurrenceRule{Frequency: frequency, UntilTime: untilTime})
|
return MatchSchedule(now, startTime, endTime, RecurrenceRule{Frequency: frequency, UntilTime: untilTime})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func FuzzMatchSchedule(f *testing.F) {
|
||||||
|
start := time.Now()
|
||||||
|
end := time.Now()
|
||||||
|
now := time.Now()
|
||||||
|
f.Fuzz(func(t *testing.T, freq string) {
|
||||||
|
// Verify that it never panics
|
||||||
|
_, _, _ = MatchSchedule(now, start, end, RecurrenceRule{Frequency: freq})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
181
controllers/sync_volumes.go
Normal file
181
controllers/sync_volumes.go
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
labelKeyCleanup = "pending-cleanup"
|
||||||
|
labelKeyRunnerStatefulSetName = "runner-statefulset-name"
|
||||||
|
)
|
||||||
|
|
||||||
|
func syncVolumes(ctx context.Context, c client.Client, log logr.Logger, ns string, runnerSet *v1alpha1.RunnerSet, statefulsets []appsv1.StatefulSet) (*ctrl.Result, error) {
|
||||||
|
log = log.WithValues("ns", ns)
|
||||||
|
|
||||||
|
for _, t := range runnerSet.Spec.StatefulSetSpec.VolumeClaimTemplates {
|
||||||
|
for _, sts := range statefulsets {
|
||||||
|
pvcName := fmt.Sprintf("%s-%s-0", t.Name, sts.Name)
|
||||||
|
|
||||||
|
var pvc corev1.PersistentVolumeClaim
|
||||||
|
if err := c.Get(ctx, types.NamespacedName{Namespace: ns, Name: pvcName}, &pvc); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO move this to statefulset reconciler so that we spam this less,
|
||||||
|
// by starting the loop only after the statefulset got deletionTimestamp set.
|
||||||
|
// Perhaps you can just wrap this in a finalizer here.
|
||||||
|
if pvc.Labels[labelKeyRunnerStatefulSetName] == "" {
|
||||||
|
updated := pvc.DeepCopy()
|
||||||
|
updated.Labels[labelKeyRunnerStatefulSetName] = sts.Name
|
||||||
|
if err := c.Update(ctx, updated); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
log.V(1).Info("Added runner-statefulset-name label to PVC", "sts", sts.Name, "pvc", pvcName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PVs are not namespaced hence we don't need client.InNamespace(ns).
|
||||||
|
// If we added that, c.List will silently return zero items.
|
||||||
|
//
|
||||||
|
// This `List` needs to be done in a dedicated reconciler that is registered to the manager via the `For` func.
|
||||||
|
// Otherwise the List func might return outdated contents(I saw status.phase being Bound even after K8s updated it to Released, and it lasted minutes).
|
||||||
|
//
|
||||||
|
// cleanupLabels := map[string]string{
|
||||||
|
// labelKeyCleanup: runnerSet.Name,
|
||||||
|
// }
|
||||||
|
// pvList := &corev1.PersistentVolumeList{}
|
||||||
|
// if err := c.List(ctx, pvList, client.MatchingLabels(cleanupLabels)); err != nil {
|
||||||
|
// log.Info("retrying pv listing", "ns", ns, "err", err)
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func syncPVC(ctx context.Context, c client.Client, log logr.Logger, ns string, pvc *corev1.PersistentVolumeClaim) (*ctrl.Result, error) {
|
||||||
|
stsName := pvc.Labels[labelKeyRunnerStatefulSetName]
|
||||||
|
if stsName == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.V(2).Info("Reconciling runner PVC")
|
||||||
|
|
||||||
|
var sts appsv1.StatefulSet
|
||||||
|
if err := c.Get(ctx, types.NamespacedName{Namespace: ns, Name: stsName}, &sts); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// We assume that the statefulset is shortly terminated, hence retry forever until it gets removed.
|
||||||
|
retry := 10 * time.Second
|
||||||
|
log.V(1).Info("Retrying sync until statefulset gets removed", "requeueAfter", retry)
|
||||||
|
return &ctrl.Result{RequeueAfter: retry}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log = log.WithValues("sts", stsName)
|
||||||
|
|
||||||
|
pvName := pvc.Spec.VolumeName
|
||||||
|
|
||||||
|
if pvName != "" {
|
||||||
|
// If we deleted PVC before unsetting pv.spec.claimRef,
|
||||||
|
// K8s seems to revive the claimRef :thinking:
|
||||||
|
// So we need to mark PV for claimRef unset first, and delete PVC, and finally unset claimRef on PV.
|
||||||
|
|
||||||
|
var pv corev1.PersistentVolume
|
||||||
|
if err := c.Get(ctx, types.NamespacedName{Namespace: ns, Name: pvName}, &pv); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pvCopy := pv.DeepCopy()
|
||||||
|
if pvCopy.Labels == nil {
|
||||||
|
pvCopy.Labels = map[string]string{}
|
||||||
|
}
|
||||||
|
pvCopy.Labels[labelKeyCleanup] = stsName
|
||||||
|
|
||||||
|
log.V(2).Info("Scheduling to unset PV's claimRef", "pv", pv.Name)
|
||||||
|
|
||||||
|
// Apparently K8s doesn't reconcile PV immediately after PVC deletion.
|
||||||
|
// So we start a relatively busy loop of PV reconcilation slightly before the PVC deletion,
|
||||||
|
// so that PV can be unbound as soon as possible after the PVC got deleted.
|
||||||
|
if err := c.Update(ctx, pvCopy); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Updated PV to unset claimRef")
|
||||||
|
|
||||||
|
// At this point, the PV is still Bound
|
||||||
|
|
||||||
|
log.V(2).Info("Deleting unused PVC")
|
||||||
|
|
||||||
|
if err := c.Delete(ctx, pvc); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Deleted unused PVC")
|
||||||
|
|
||||||
|
// At this point, the PV is still "Bound", but we are ready to unset pv.spec.claimRef in pv controller.
|
||||||
|
// Once the pv controller unsets claimRef, the PV becomes "Released", hence available for reuse by another eligible PVC.
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func syncPV(ctx context.Context, c client.Client, log logr.Logger, ns string, pv *corev1.PersistentVolume) (*ctrl.Result, error) {
|
||||||
|
if pv.Spec.ClaimRef == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.V(2).Info("Reconciling PV")
|
||||||
|
|
||||||
|
if pv.Labels[labelKeyCleanup] == "" {
|
||||||
|
// We assume that the pvc is shortly terminated, hence retry forever until it gets removed.
|
||||||
|
retry := 10 * time.Second
|
||||||
|
log.V(2).Info("Retrying sync to see if this PV needs to be managed by ARC", "requeueAfter", retry)
|
||||||
|
return &ctrl.Result{RequeueAfter: retry}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.V(2).Info("checking pv phase", "phase", pv.Status.Phase)
|
||||||
|
|
||||||
|
if pv.Status.Phase != corev1.VolumeReleased {
|
||||||
|
// We assume that the pvc is shortly terminated, hence retry forever until it gets removed.
|
||||||
|
retry := 10 * time.Second
|
||||||
|
log.V(1).Info("Retrying sync until pvc gets released", "requeueAfter", retry)
|
||||||
|
return &ctrl.Result{RequeueAfter: retry}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point, the PV is still Released
|
||||||
|
|
||||||
|
pvCopy := pv.DeepCopy()
|
||||||
|
delete(pvCopy.Labels, labelKeyCleanup)
|
||||||
|
pvCopy.Spec.ClaimRef = nil
|
||||||
|
log.V(2).Info("Unsetting PV's claimRef", "pv", pv.Name)
|
||||||
|
if err := c.Update(ctx, pvCopy); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("PV should be Available now")
|
||||||
|
|
||||||
|
// At this point, the PV becomes Available, if it's reclaim policy is "Retain".
|
||||||
|
// I have not yet tested it with "Delete" but perhaps it's deleted automatically after the update?
|
||||||
|
// https://kubernetes.io/docs/concepts/storage/persistent-volumes/#retain
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
@@ -3,6 +3,9 @@ package controllers
|
|||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_filterLabels(t *testing.T) {
|
func Test_filterLabels(t *testing.T) {
|
||||||
@@ -32,3 +35,94 @@ func Test_filterLabels(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_workVolumeClaimTemplateVolumeV1VolumeTransformation(t *testing.T) {
|
||||||
|
storageClassName := "local-storage"
|
||||||
|
workVolumeClaimTemplate := v1alpha1.WorkVolumeClaimTemplate{
|
||||||
|
StorageClassName: storageClassName,
|
||||||
|
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce, corev1.ReadWriteMany},
|
||||||
|
Resources: corev1.ResourceRequirements{},
|
||||||
|
}
|
||||||
|
want := corev1.Volume{
|
||||||
|
Name: "work",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
Ephemeral: &corev1.EphemeralVolumeSource{
|
||||||
|
VolumeClaimTemplate: &corev1.PersistentVolumeClaimTemplate{
|
||||||
|
Spec: corev1.PersistentVolumeClaimSpec{
|
||||||
|
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce, corev1.ReadWriteMany},
|
||||||
|
StorageClassName: &storageClassName,
|
||||||
|
Resources: corev1.ResourceRequirements{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
got := workVolumeClaimTemplate.V1Volume()
|
||||||
|
|
||||||
|
if got.Name != want.Name {
|
||||||
|
t.Errorf("want name %q, got %q\n", want.Name, got.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got.VolumeSource.Ephemeral == nil {
|
||||||
|
t.Fatal("work volume claim template should transform itself into Ephemeral volume source\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
if got.VolumeSource.Ephemeral.VolumeClaimTemplate == nil {
|
||||||
|
t.Fatal("work volume claim template should have ephemeral volume claim template set\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
gotClassName := *got.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||||
|
wantClassName := *want.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||||
|
if gotClassName != wantClassName {
|
||||||
|
t.Errorf("expected storage class name %q, got %q\n", wantClassName, gotClassName)
|
||||||
|
}
|
||||||
|
|
||||||
|
gotAccessModes := got.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||||
|
wantAccessModes := want.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||||
|
if len(gotAccessModes) != len(wantAccessModes) {
|
||||||
|
t.Fatalf("access modes lengths missmatch: got %v, expected %v\n", gotAccessModes, wantAccessModes)
|
||||||
|
}
|
||||||
|
|
||||||
|
diff := make(map[corev1.PersistentVolumeAccessMode]int, len(wantAccessModes))
|
||||||
|
for _, am := range wantAccessModes {
|
||||||
|
diff[am]++
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, am := range gotAccessModes {
|
||||||
|
_, ok := diff[am]
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("got access mode %v that is not in the wanted access modes\n", am)
|
||||||
|
}
|
||||||
|
|
||||||
|
diff[am]--
|
||||||
|
if diff[am] == 0 {
|
||||||
|
delete(diff, am)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(diff) != 0 {
|
||||||
|
t.Fatalf("got access modes did not take every access mode into account\nactual: %v expected: %v\n", gotAccessModes, wantAccessModes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_workVolumeClaimTemplateV1VolumeMount(t *testing.T) {
|
||||||
|
|
||||||
|
workVolumeClaimTemplate := v1alpha1.WorkVolumeClaimTemplate{
|
||||||
|
StorageClassName: "local-storage",
|
||||||
|
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce, corev1.ReadWriteMany},
|
||||||
|
Resources: corev1.ResourceRequirements{},
|
||||||
|
}
|
||||||
|
|
||||||
|
mountPath := "/test/_work"
|
||||||
|
want := corev1.VolumeMount{
|
||||||
|
MountPath: mountPath,
|
||||||
|
Name: "work",
|
||||||
|
}
|
||||||
|
|
||||||
|
got := workVolumeClaimTemplate.V1VolumeMount(mountPath)
|
||||||
|
|
||||||
|
if want != got {
|
||||||
|
t.Fatalf("expected volume mount %+v, actual %+v\n", want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
54
docs/releasenotes/0.24.md
Normal file
54
docs/releasenotes/0.24.md
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
# actions-runner-controller v0.24.0
|
||||||
|
|
||||||
|
All changes in this release can be found in the milestone https://github.com/actions-runner-controller/actions-runner-controller/milestone/4
|
||||||
|
|
||||||
|
This log documents breaking and major enhancements
|
||||||
|
|
||||||
|
## Upgrading
|
||||||
|
|
||||||
|
In case you're using our Helm chart to deploy ARC, use the chart 0.19.0 or greater. Don't miss upgrading CRDs as usual! Helm doesn't upgrade CRDs.
|
||||||
|
|
||||||
|
## BREAKING CHANGE : Support for `--once` is being dropped
|
||||||
|
|
||||||
|
> **Warning**: If you're using ARC's official runer image, make sure to update the image tag to `v2.292.0` BEFORE upgrading ARC
|
||||||
|
|
||||||
|
In #1385 we changed ARC to NOT automatically set the feature flag `RUNNER_FEATURE_FLAG_EPHEMERAL=true`. If you're using ARC's official runer image, make sure to update the image tag to `v2.292.0` before upgrading ARC, because that's the first runner image release since we changed the default to `--ephemeral`. If you kept using an older runner image after upgrading ARC, you end up using `--once` which is unreliable and had been deprecated since almost a year ago.
|
||||||
|
|
||||||
|
>> **Warning**: If you're using a custom runner image, incorporate changes made in #1384 to your runner image dockerfile
|
||||||
|
|
||||||
|
If you're building a custom runner image on your own and it still requires the user to specify `RUNNER_FEATURE_FLAG_EPHEMERAL=true` to use `--ephemeral`, check #1384 and update your custom runner image dockerfile accordingly. Otherwise, you may unexpectedly end up with using `--once` after upgrading ARC, because that was the previous default.
|
||||||
|
|
||||||
|
Relevant PR(s): #1384, #1385
|
||||||
|
|
||||||
|
## FIX : Prevent runner form stucking in Terminating when the container disappeared
|
||||||
|
|
||||||
|
We occasionally heard about runnner pods stuck in Terminating after the node and containers running on it disappeared due to, for example, the machine terminated prematurely.
|
||||||
|
|
||||||
|
We now set runner pods' restartPolicy to `Never` and remove runner pods stuck in `Waiting` after restarting, so that the pods are more likely to NOT stuck forever.
|
||||||
|
|
||||||
|
Relevant PR(s): #1395, #1420
|
||||||
|
|
||||||
|
## ENHANCEMENT : Support arbitrarily setting `privileged: true` for runner container
|
||||||
|
|
||||||
|
This is a frequently asked feature that alows you to force `privileged: true` in case you don't need docker but still need privileged tasks to be run in a job step.
|
||||||
|
|
||||||
|
In combination with a container runtime like `sysbox` this should enable you to run docker builds within the dind sidecar, all without privileges. See [the discussion related to Sysbox](https://github.com/actions-runner-controller/actions-runner-controller/discussions/977) for more information.
|
||||||
|
|
||||||
|
Note that we ARC maintainers still have no bandwidth to provide a complete description on how to make ARC work with `sysbox` yet, but almost certainly we'd welcome contributions to the documentation if you managed to make it work.
|
||||||
|
|
||||||
|
Relevant PR(s): #1383
|
||||||
|
|
||||||
|
## ENHANCEMENT : RunnerSet can now retain PVs accross restarts
|
||||||
|
|
||||||
|
This enhancement makes it more practical to use RunnerSet in combination with `volumeClaimTemplates` to make your workflow jobs faster.
|
||||||
|
|
||||||
|
Please see our updated ["Custom Volume Mounts" section in the documentation](https://github.com/actions-runner-controller/actions-runner-controller#custom-volume-mounts) for more information. Currently, we cover caching Docker image layers, go mod/build, and PV-backed runner work directory(Although this one is backed by another feature unrelated to this enhancement under the hood).
|
||||||
|
|
||||||
|
Relevant PR(s): #1340
|
||||||
|
|
||||||
|
## ENHANCEMENT : OpenSSF scorecard adoption
|
||||||
|
|
||||||
|
We assessed the project's security by following OpenSSF scorecard checks and adopting OpenSSF best practices.
|
||||||
|
It should help you judge the security throughout ARC's development and release processes.
|
||||||
|
|
||||||
|
Relevant PR(s): #1461
|
||||||
43
docs/releasenotes/0.25.md
Normal file
43
docs/releasenotes/0.25.md
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
# actions-runner-controller v0.25.0
|
||||||
|
|
||||||
|
All planned changes in this release can be found in the milestone https://github.com/actions-runner-controller/actions-runner-controller/milestone/8.
|
||||||
|
|
||||||
|
Also see https://github.com/actions-runner-controller/actions-runner-controller/compare/v0.24.1...v0.25.0 for full changelog.
|
||||||
|
|
||||||
|
This log documents breaking changes and major enhancements
|
||||||
|
|
||||||
|
## Upgrading
|
||||||
|
|
||||||
|
In case you're using our Helm chart to deploy ARC, use the chart 0.20.0 or greater. Don't miss upgrading CRDs as usual! Helm doesn't upgrade CRDs.
|
||||||
|
|
||||||
|
## BREAKING CHANGE : Support for `--once` has been dropped
|
||||||
|
|
||||||
|
In case you're still on ARC v0.23.0 or earlier, please also read [the relevant part of v0.24.0 release note for more information](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/docs/releasenotes/0.24.md#breaking-change--support-for---once-is-being-dropped).
|
||||||
|
|
||||||
|
Relevant PR(s): #1580, #1590
|
||||||
|
|
||||||
|
## ENHANCEMENT : Support for the new Kubernetes container mode of Actions runner
|
||||||
|
|
||||||
|
The GitHub Actions team has recently added `actions/runner` an ability to use [runner container hooks](https://github.com/actions/runner-container-hooks) to run job steps on Kubernetes pods instead of docker containers created by the `docker` command. It allows us to avoid the use of privileged containers while still being able to run container-backed job steps.
|
||||||
|
|
||||||
|
To use the new container mode, you set `.spec.template.spec.containerMode` in `RunnerDeployment` to `"kubernetes"`, while defining `.spec.template.spec.workVolumeClaimTemplate`. The volume claim template is used for provisioning and assigning persistent volumes mounted across the runner pod and the job pods for sharing the job workspace.
|
||||||
|
|
||||||
|
Before using this feature, we highly recommend you to read [the detailed explanation in the original pull request](https://github.com/actions-runner-controller/actions-runner-controller/pull/1546), and [the new section in ARC's documentation](https://github.com/actions-runner-controller/actions-runner-controller#runner-with-k8s-jobs).
|
||||||
|
|
||||||
|
Big kudos to @thboop and the GitHub Actions team for implementing and contributing this feature!
|
||||||
|
|
||||||
|
Relevant PR(s): #1546
|
||||||
|
|
||||||
|
## FIX : Webhook-based scaling is even more reliable
|
||||||
|
|
||||||
|
We fixed a race condition in the webhook-based autoscaler that resulted in not adding a runner when necessary.
|
||||||
|
|
||||||
|
The race condition had been happening when it received a webhook event while processing another webhook event and both ended up scaling up the same horizontal runner autoscaler at the same time.
|
||||||
|
|
||||||
|
To mitigate that, ARC now uses Kubernetes' Update API instead of Patch to update `HRA.spec.capacityReservations` which is the underlying data structure that makes the webhook-based scaler to add replicas to RunnerDeployment or RunnerSet on demand.
|
||||||
|
|
||||||
|
We were also worried about stressing the Kubernetes apiserver when your ARC webhook-based autoscaler received a lot of concurrent webhook events, we also enhanced it to batch the Update API calls for 3 seconds, which basically means it will call the Update API at most once every 3 seconds per webhook-based autoscaler instance.
|
||||||
|
|
||||||
|
Lastly, we fixed a bug in the autoscaler that resulted in it to stop adding replicas for newly received webhook events when the desired replicas reached `maxReplicas`.
|
||||||
|
|
||||||
|
Relevant PR(s): #1477, #1568
|
||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -12,9 +12,9 @@ import (
|
|||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github/metrics"
|
"github.com/actions-runner-controller/actions-runner-controller/github/metrics"
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/logging"
|
"github.com/actions-runner-controller/actions-runner-controller/logging"
|
||||||
"github.com/bradleyfalzon/ghinstallation"
|
"github.com/bradleyfalzon/ghinstallation/v2"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
"github.com/gregjones/httpcache"
|
"github.com/gregjones/httpcache"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
@@ -248,7 +248,8 @@ func (c *Client) ListRunners(ctx context.Context, enterprise, org, repo string)
|
|||||||
func (c *Client) ListOrganizationRunnerGroups(ctx context.Context, org string) ([]*github.RunnerGroup, error) {
|
func (c *Client) ListOrganizationRunnerGroups(ctx context.Context, org string) ([]*github.RunnerGroup, error) {
|
||||||
var runnerGroups []*github.RunnerGroup
|
var runnerGroups []*github.RunnerGroup
|
||||||
|
|
||||||
opts := github.ListOptions{PerPage: 100}
|
opts := github.ListOrgRunnerGroupOptions{}
|
||||||
|
opts.PerPage = 100
|
||||||
for {
|
for {
|
||||||
list, res, err := c.Client.Actions.ListOrganizationRunnerGroups(ctx, org, &opts)
|
list, res, err := c.Client.Actions.ListOrganizationRunnerGroups(ctx, org, &opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -271,9 +272,21 @@ func (c *Client) ListOrganizationRunnerGroups(ctx context.Context, org string) (
|
|||||||
func (c *Client) ListOrganizationRunnerGroupsForRepository(ctx context.Context, org, repo string) ([]*github.RunnerGroup, error) {
|
func (c *Client) ListOrganizationRunnerGroupsForRepository(ctx context.Context, org, repo string) ([]*github.RunnerGroup, error) {
|
||||||
var runnerGroups []*github.RunnerGroup
|
var runnerGroups []*github.RunnerGroup
|
||||||
|
|
||||||
opts := github.ListOptions{PerPage: 100}
|
var opts github.ListOrgRunnerGroupOptions
|
||||||
|
|
||||||
|
opts.PerPage = 100
|
||||||
|
|
||||||
|
repoName := repo
|
||||||
|
parts := strings.Split(repo, "/")
|
||||||
|
if len(parts) == 2 {
|
||||||
|
repoName = parts[1]
|
||||||
|
}
|
||||||
|
// This must be the repo name without the owner part, so in case the repo is "myorg/myrepo" the repo name
|
||||||
|
// passed to visible_to_repository must be "myrepo".
|
||||||
|
opts.VisibleToRepository = repoName
|
||||||
|
|
||||||
for {
|
for {
|
||||||
list, res, err := c.listOrganizationRunnerGroupsVisibleToRepo(ctx, org, repo, &opts)
|
list, res, err := c.Actions.ListOrganizationRunnerGroups(ctx, org, &opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return runnerGroups, fmt.Errorf("failed to list organization runner groups: %w", err)
|
return runnerGroups, fmt.Errorf("failed to list organization runner groups: %w", err)
|
||||||
}
|
}
|
||||||
@@ -309,42 +322,6 @@ func (c *Client) ListRunnerGroupRepositoryAccesses(ctx context.Context, org stri
|
|||||||
return repos, nil
|
return repos, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// listOrganizationRunnerGroupsVisibleToRepo lists all self-hosted runner groups configured in an organization which can be used by the repository.
|
|
||||||
//
|
|
||||||
// GitHub API docs: https://docs.github.com/en/rest/reference/actions#list-self-hosted-runner-groups-for-an-organization
|
|
||||||
func (c *Client) listOrganizationRunnerGroupsVisibleToRepo(ctx context.Context, org, repo string, opts *github.ListOptions) (*github.RunnerGroups, *github.Response, error) {
|
|
||||||
repoName := repo
|
|
||||||
parts := strings.Split(repo, "/")
|
|
||||||
if len(parts) == 2 {
|
|
||||||
repoName = parts[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
u := fmt.Sprintf("orgs/%v/actions/runner-groups?visible_to_repository=%v", org, repoName)
|
|
||||||
|
|
||||||
if opts != nil {
|
|
||||||
if opts.PerPage > 0 {
|
|
||||||
u = fmt.Sprintf("%v&per_page=%v", u, opts.PerPage)
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.Page > 0 {
|
|
||||||
u = fmt.Sprintf("%v&page=%v", u, opts.Page)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := c.Client.NewRequest("GET", u, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
groups := &github.RunnerGroups{}
|
|
||||||
resp, err := c.Client.Do(ctx, req, &groups)
|
|
||||||
if err != nil {
|
|
||||||
return nil, resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return groups, resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanup removes expired registration tokens.
|
// cleanup removes expired registration tokens.
|
||||||
func (c *Client) cleanup() {
|
func (c *Client) cleanup() {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
var server *httptest.Server
|
var server *httptest.Server
|
||||||
|
|||||||
62
go.mod
62
go.mod
@@ -1,52 +1,62 @@
|
|||||||
module github.com/actions-runner-controller/actions-runner-controller
|
module github.com/actions-runner-controller/actions-runner-controller
|
||||||
|
|
||||||
go 1.17
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/bradleyfalzon/ghinstallation v1.1.1
|
github.com/bradleyfalzon/ghinstallation/v2 v2.0.4
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/go-logr/logr v1.2.0
|
github.com/go-logr/logr v1.2.3
|
||||||
github.com/google/go-cmp v0.5.8
|
github.com/google/go-cmp v0.5.8
|
||||||
github.com/google/go-github/v39 v39.2.0
|
github.com/google/go-github/v45 v45.2.0
|
||||||
github.com/gorilla/mux v1.8.0
|
github.com/gorilla/mux v1.8.0
|
||||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
|
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
|
||||||
github.com/kelseyhightower/envconfig v1.4.0
|
github.com/kelseyhightower/envconfig v1.4.0
|
||||||
github.com/onsi/ginkgo v1.16.5
|
github.com/onsi/ginkgo v1.16.5
|
||||||
github.com/onsi/gomega v1.17.0
|
github.com/onsi/gomega v1.19.0
|
||||||
github.com/prometheus/client_golang v1.12.1
|
github.com/prometheus/client_golang v1.12.2
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.8.0
|
||||||
github.com/teambition/rrule-go v1.8.0
|
github.com/teambition/rrule-go v1.8.0
|
||||||
go.uber.org/zap v1.21.0
|
go.uber.org/zap v1.21.0
|
||||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a
|
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0
|
||||||
gomodules.xyz/jsonpatch/v2 v2.2.0
|
gomodules.xyz/jsonpatch/v2 v2.2.0
|
||||||
k8s.io/api v0.23.5
|
k8s.io/api v0.24.2
|
||||||
k8s.io/apimachinery v0.23.5
|
k8s.io/apimachinery v0.24.2
|
||||||
k8s.io/client-go v0.23.5
|
k8s.io/client-go v0.24.2
|
||||||
sigs.k8s.io/controller-runtime v0.11.2
|
sigs.k8s.io/controller-runtime v0.12.2
|
||||||
sigs.k8s.io/yaml v1.3.0
|
sigs.k8s.io/yaml v1.3.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go v0.81.0 // indirect
|
cloud.google.com/go v0.81.0 // indirect
|
||||||
|
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||||
|
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
|
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
||||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||||
github.com/go-logr/zapr v1.2.0 // indirect
|
github.com/go-logr/zapr v1.2.0 // indirect
|
||||||
|
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||||
|
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
||||||
|
github.com/go-openapi/swag v0.19.14 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.0.0 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/go-github/v29 v29.0.2 // indirect
|
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||||
|
github.com/google/go-github/v41 v41.0.0 // indirect
|
||||||
github.com/google/go-querystring v1.1.0 // indirect
|
github.com/google/go-querystring v1.1.0 // indirect
|
||||||
github.com/google/gofuzz v1.1.0 // indirect
|
github.com/google/gofuzz v1.1.0 // indirect
|
||||||
github.com/google/uuid v1.1.2 // indirect
|
github.com/google/uuid v1.1.2 // indirect
|
||||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||||
github.com/imdario/mergo v0.3.12 // indirect
|
github.com/imdario/mergo v0.3.12 // indirect
|
||||||
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
|
github.com/mailru/easyjson v0.7.6 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/nxadm/tail v1.4.8 // indirect
|
github.com/nxadm/tail v1.4.8 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
@@ -56,24 +66,24 @@ require (
|
|||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
go.uber.org/atomic v1.7.0 // indirect
|
go.uber.org/atomic v1.7.0 // indirect
|
||||||
go.uber.org/multierr v1.6.0 // indirect
|
go.uber.org/multierr v1.6.0 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
|
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
|
||||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
|
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
google.golang.org/protobuf v1.28.0 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/apiextensions-apiserver v0.23.5 // indirect
|
k8s.io/apiextensions-apiserver v0.24.2 // indirect
|
||||||
k8s.io/component-base v0.23.5 // indirect
|
k8s.io/component-base v0.24.2 // indirect
|
||||||
k8s.io/klog/v2 v2.30.0 // indirect
|
k8s.io/klog/v2 v2.60.1 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
||||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
|
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
146
go.sum
146
go.sum
@@ -52,7 +52,9 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
|
|||||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
|
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||||
|
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||||
github.com/actions-runner-controller/httpcache v0.2.0 h1:hCNvYuVPJ2xxYBymqBvH0hSiQpqz4PHF/LbU3XghGNI=
|
github.com/actions-runner-controller/httpcache v0.2.0 h1:hCNvYuVPJ2xxYBymqBvH0hSiQpqz4PHF/LbU3XghGNI=
|
||||||
github.com/actions-runner-controller/httpcache v0.2.0/go.mod h1:JLu9/2M/btPz1Zu/vTZ71XzukQHn2YeISPmJoM5exBI=
|
github.com/actions-runner-controller/httpcache v0.2.0/go.mod h1:JLu9/2M/btPz1Zu/vTZ71XzukQHn2YeISPmJoM5exBI=
|
||||||
@@ -66,6 +68,7 @@ github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.m
|
|||||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||||
@@ -78,12 +81,12 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
|
|||||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
||||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||||
github.com/bradleyfalzon/ghinstallation v1.1.1 h1:pmBXkxgM1WeF8QYvDLT5kuQiHMcmf+X015GI0KM/E3I=
|
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||||
github.com/bradleyfalzon/ghinstallation v1.1.1/go.mod h1:vyCmHTciHx/uuyN82Zc3rXN3X2KTK8nUTCrTMwAhcug=
|
github.com/bradleyfalzon/ghinstallation/v2 v2.0.4 h1:tXKVfhE7FcSkhkv0UwkLvPDeZ4kz6OXd0PKPlFqf81M=
|
||||||
|
github.com/bradleyfalzon/ghinstallation/v2 v2.0.4/go.mod h1:B40qPqJxWE0jDZgOR1JmaMy+4AY1eBP+IByOvqyAKp0=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||||
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||||
@@ -107,18 +110,19 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7
|
|||||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
|
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
@@ -153,15 +157,19 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
|
|||||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||||
github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE=
|
|
||||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
|
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||||
|
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk=
|
github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk=
|
||||||
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
|
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
|
||||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
|
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||||
|
github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
|
||||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
||||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
|
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
|
||||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||||
@@ -171,6 +179,8 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
|
|||||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o=
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
@@ -209,7 +219,10 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
|
|||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||||
github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
|
github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
|
||||||
|
github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
|
||||||
github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
|
github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
|
||||||
|
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
|
||||||
|
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
@@ -222,15 +235,12 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
|
|
||||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
|
||||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/go-github/v29 v29.0.2 h1:opYN6Wc7DOz7Ku3Oh4l7prmkOMwEcQxpFtxdU8N8Pts=
|
github.com/google/go-github/v41 v41.0.0 h1:HseJrM2JFf2vfiZJ8anY2hqBjdfY1Vlj/K27ueww4gg=
|
||||||
github.com/google/go-github/v29 v29.0.2/go.mod h1:CHKiKKPHJ0REzfwc14QMklvtHwCveD0PxlMjLlzAM5E=
|
github.com/google/go-github/v41 v41.0.0/go.mod h1:XgmCA5H323A9rtgExdTcnDkcqp6S30AVACCBDOonIxg=
|
||||||
github.com/google/go-github/v39 v39.2.0 h1:rNNM311XtPOz5rDdsJXAp2o8F67X9FnROXTvto3aSnQ=
|
github.com/google/go-github/v45 v45.2.0 h1:5oRLszbrkvxDDqBCNj2hjDZMKmvexaZ1xw/FCD+K3FI=
|
||||||
github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE=
|
github.com/google/go-github/v45 v45.2.0/go.mod h1:FObaZJEDSTa/WGCzZ2Z3eoCDXWJKMenWWTrd8jrta28=
|
||||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
|
||||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
@@ -297,6 +307,7 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
|
|||||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||||
|
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
@@ -329,6 +340,7 @@ github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
|
|||||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
||||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
@@ -347,6 +359,7 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
|||||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
|
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
|
||||||
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
@@ -355,6 +368,7 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
|
|||||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
@@ -369,14 +383,14 @@ github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB
|
|||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
|
||||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
|
||||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
|
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
|
||||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
@@ -396,8 +410,9 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
|
|||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
|
|
||||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||||
|
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
||||||
|
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
@@ -423,6 +438,7 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So
|
|||||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
@@ -443,6 +459,7 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
|
|||||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
||||||
|
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
||||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
@@ -453,16 +470,20 @@ github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH
|
|||||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
||||||
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.7.5 h1:s5PTfem8p8EbKQOctVV53k6jCJt3UX4IEJzwh+C324Q=
|
||||||
|
github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
|
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||||
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||||
github.com/teambition/rrule-go v1.7.2 h1:goEajFWYydfCgavn2m/3w5U+1b3PGqPUHx/fFSVfTy0=
|
|
||||||
github.com/teambition/rrule-go v1.7.2/go.mod h1:mBJ1Ht5uboJ6jexKdNUJg2NcwP8uUMNvStWXlJD3MvU=
|
|
||||||
github.com/teambition/rrule-go v1.8.0 h1:a/IX5s56hGkFF+nRlJUooZU/45OTeeldBGL29nDKIHw=
|
github.com/teambition/rrule-go v1.8.0 h1:a/IX5s56hGkFF+nRlJUooZU/45OTeeldBGL29nDKIHw=
|
||||||
github.com/teambition/rrule-go v1.8.0/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4=
|
github.com/teambition/rrule-go v1.8.0/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
@@ -474,12 +495,16 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
|
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||||
|
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||||
|
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
||||||
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
|
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
|
||||||
|
go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q=
|
||||||
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
|
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
|
||||||
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
|
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
|
||||||
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
|
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
|
||||||
@@ -506,17 +531,14 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
|||||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
|
||||||
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||||
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
|
|
||||||
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
|
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
|
||||||
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
|
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
@@ -530,6 +552,9 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
|||||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
|
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE=
|
||||||
|
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
@@ -565,6 +590,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@@ -608,13 +634,17 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
|
||||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
|
|
||||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
|
||||||
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
|
||||||
|
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
@@ -629,8 +659,13 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ
|
|||||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a h1:qfl7ob3DIEs3Ml9oLuPwY2N04gymzAW04WsUQHIClgM=
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26 h1:uBgVQYJLi/m8M0wzp+aGwBWt90gMRoOVf+aWTW10QHI=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 h1:VnGaRqoLmqZH/3TMLJwYCEWkR4j1nuIU1U9TvbqsDUw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@@ -707,10 +742,14 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
|
||||||
|
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||||
@@ -731,6 +770,8 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb
|
|||||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
||||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
|
||||||
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
@@ -790,10 +831,10 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
|||||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
|
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
|
||||||
|
golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY=
|
gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY=
|
||||||
gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
|
gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
|
||||||
@@ -872,6 +913,7 @@ google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6D
|
|||||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||||
|
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
@@ -908,6 +950,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
|
|||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||||
|
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
@@ -939,6 +983,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
|||||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
@@ -948,58 +994,60 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
|||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg=
|
|
||||||
k8s.io/api v0.23.4 h1:85gnfXQOWbJa1SiWGpE9EEtHs0UVvDyIsSMpEtl2D4E=
|
|
||||||
k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI=
|
|
||||||
k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA=
|
k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA=
|
||||||
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
|
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
|
||||||
k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY=
|
k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI=
|
||||||
k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4=
|
k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg=
|
||||||
k8s.io/apiextensions-apiserver v0.23.5 h1:5SKzdXyvIJKu+zbfPc3kCbWpbxi+O+zdmAJBm26UJqI=
|
k8s.io/apiextensions-apiserver v0.23.5 h1:5SKzdXyvIJKu+zbfPc3kCbWpbxi+O+zdmAJBm26UJqI=
|
||||||
k8s.io/apiextensions-apiserver v0.23.5/go.mod h1:ntcPWNXS8ZPKN+zTXuzYMeg731CP0heCTl6gYBxLcuQ=
|
k8s.io/apiextensions-apiserver v0.23.5/go.mod h1:ntcPWNXS8ZPKN+zTXuzYMeg731CP0heCTl6gYBxLcuQ=
|
||||||
k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc=
|
k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k=
|
||||||
k8s.io/apimachinery v0.23.4 h1:fhnuMd/xUL3Cjfl64j5ULKZ1/J9n8NuQEgNL+WXWfdM=
|
k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ=
|
||||||
k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
|
||||||
k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0=
|
k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0=
|
||||||
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||||
k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4=
|
k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM=
|
||||||
|
k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
|
||||||
k8s.io/apiserver v0.23.5/go.mod h1:7wvMtGJ42VRxzgVI7jkbKvMbuCbVbgsWFT7RyXiRNTw=
|
k8s.io/apiserver v0.23.5/go.mod h1:7wvMtGJ42VRxzgVI7jkbKvMbuCbVbgsWFT7RyXiRNTw=
|
||||||
k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA=
|
k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI=
|
||||||
k8s.io/client-go v0.23.4 h1:YVWvPeerA2gpUudLelvsolzH7c2sFoXXR5wM/sWqNFU=
|
|
||||||
k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0=
|
|
||||||
k8s.io/client-go v0.23.5 h1:zUXHmEuqx0RY4+CsnkOn5l0GU+skkRXKGJrhmE2SLd8=
|
k8s.io/client-go v0.23.5 h1:zUXHmEuqx0RY4+CsnkOn5l0GU+skkRXKGJrhmE2SLd8=
|
||||||
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
|
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
|
||||||
k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE=
|
k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA=
|
||||||
|
k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30=
|
||||||
k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
||||||
k8s.io/component-base v0.23.0 h1:UAnyzjvVZ2ZR1lF35YwtNY6VMN94WtOnArcXBu34es8=
|
k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
|
||||||
k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI=
|
|
||||||
k8s.io/component-base v0.23.5 h1:8qgP5R6jG1BBSXmRYW+dsmitIrpk8F/fPEvgDenMCCE=
|
k8s.io/component-base v0.23.5 h1:8qgP5R6jG1BBSXmRYW+dsmitIrpk8F/fPEvgDenMCCE=
|
||||||
k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0=
|
k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0=
|
||||||
|
k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU=
|
||||||
|
k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM=
|
||||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||||
|
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||||
k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
|
k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
|
||||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
|
k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
|
||||||
|
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
|
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
|
||||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
|
||||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
|
||||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE=
|
k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE=
|
||||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||||
|
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
|
||||||
|
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I=
|
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
|
||||||
sigs.k8s.io/controller-runtime v0.11.1 h1:7YIHT2QnHJArj/dk9aUkYhfqfK5cIxPOX5gPECfdZLU=
|
|
||||||
sigs.k8s.io/controller-runtime v0.11.1/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA=
|
|
||||||
sigs.k8s.io/controller-runtime v0.11.2 h1:H5GTxQl0Mc9UjRJhORusqfJCIjBO8UtUxGggCwL1rLA=
|
sigs.k8s.io/controller-runtime v0.11.2 h1:H5GTxQl0Mc9UjRJhORusqfJCIjBO8UtUxGggCwL1rLA=
|
||||||
sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8MRFsn4dWF7O4=
|
sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8MRFsn4dWF7O4=
|
||||||
|
sigs.k8s.io/controller-runtime v0.12.2 h1:nqV02cvhbAj7tbt21bpPpTByrXGn2INHRsi39lXy9sE=
|
||||||
|
sigs.k8s.io/controller-runtime v0.12.2/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0=
|
||||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
|
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
|
||||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||||
|
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
|
||||||
|
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||||
|
|||||||
67
hack/signrel/README.md
Normal file
67
hack/signrel/README.md
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# signrel
|
||||||
|
|
||||||
|
`signrel` is the utility command for downloading `actions-runner-controller` release assets, sigining those, and uploading the signature files.
|
||||||
|
|
||||||
|
## Verifying Release Assets
|
||||||
|
|
||||||
|
For users, browse https://keys.openpgp.org/search?q=D8078411E3D8400B574EDB0441B69B728F095A87 and download the public key, or refer to [the instruction](https://keys.openpgp.org/about/usage#gnupg-retrieve) to import the key onto your machine.
|
||||||
|
|
||||||
|
Next, you'll want to verify the signature of the download asset somehow.
|
||||||
|
|
||||||
|
With `gpg`, you would usually do that by downloading both the asset and the signature files from our specific release page, and run `gpg --verify` like:
|
||||||
|
|
||||||
|
```console
|
||||||
|
# Download the asset
|
||||||
|
curl -LO https://github.com/actions-runner-controller/actions-runner-controller/releases/download/v0.23.0/actions-runner-controller.yaml
|
||||||
|
|
||||||
|
# Download the signature file
|
||||||
|
curl -LO https://github.com/actions-runner-controller/actions-runner-controller/releases/download/v0.23.0/actions-runner-controller.yaml.asc
|
||||||
|
|
||||||
|
# Verify
|
||||||
|
gpg --verify actions-runner-controller.yaml{.asc,}
|
||||||
|
```
|
||||||
|
|
||||||
|
On succesful verification, the gpg command would output:
|
||||||
|
|
||||||
|
```
|
||||||
|
gpg: Signature made Tue 10 May 2022 04:15:32 AM UTC
|
||||||
|
gpg: using RSA key D8078411E3D8400B574EDB0441B69B728F095A87
|
||||||
|
gpg: checking the trustdb
|
||||||
|
gpg: marginals needed: 3 completes needed: 1 trust model: pgp
|
||||||
|
gpg: depth: 0 valid: 1 signed: 0 trust: 0-, 0q, 0n, 0m, 0f, 1u
|
||||||
|
gpg: next trustdb check due at 2024-05-09
|
||||||
|
gpg: Good signature from "Yusuke Kuoka <ykuoka@gmail.com>" [ultimate]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Signing Release Assets
|
||||||
|
|
||||||
|
Assuming you are a maintainer of the project who has admin permission, run the command like the below to sign assets and upload the signature files:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ cd hack/signrel
|
||||||
|
|
||||||
|
$ for v in v0.23.0 actions-runner-controller-0.18.0 v0.22.3 v0.22.2 actions-runner-controller-0.17.2; do TAG=$v go run . sign; done
|
||||||
|
|
||||||
|
Downloading actions-runner-controller.yaml to downloads/v0.23.0/actions-runner-controller.yaml
|
||||||
|
Uploading downloads/v0.23.0/actions-runner-controller.yaml.asc
|
||||||
|
downloads/v0.23.0/actions-runner-controller.yaml.asc has been already uploaded
|
||||||
|
Downloading actions-runner-controller-0.18.0.tgz to downloads/actions-runner-controller-0.18.0/actions-runner-controller-0.18.0.tgz
|
||||||
|
Uploading downloads/actions-runner-controller-0.18.0/actions-runner-controller-0.18.0.tgz.asc
|
||||||
|
Upload completed: *snip*
|
||||||
|
Downloading actions-runner-controller.yaml to downloads/v0.22.3/actions-runner-controller.yaml
|
||||||
|
Uploading downloads/v0.22.3/actions-runner-controller.yaml.asc
|
||||||
|
Upload completed: *snip*
|
||||||
|
Downloading actions-runner-controller.yaml to downloads/v0.22.2/actions-runner-controller.yaml
|
||||||
|
Uploading downloads/v0.22.2/actions-runner-controller.yaml.asc
|
||||||
|
Upload completed: *snip*
|
||||||
|
Downloading actions-runner-controller-0.17.2.tgz to downloads/actions-runner-controller-0.17.2/actions-runner-controller-0.17.2.tgz
|
||||||
|
Uploading downloads/actions-runner-controller-0.17.2/actions-runner-controller-0.17.2.tgz.asc
|
||||||
|
Upload completed: *snip*
|
||||||
|
actions-runner-controller-0.17.2.tgz.asc"}
|
||||||
|
```
|
||||||
|
|
||||||
|
To retrieve all the available release tags, run:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go run . tags | jq -r .[].tag_name
|
||||||
|
```
|
||||||
3
hack/signrel/go.mod
Normal file
3
hack/signrel/go.mod
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
module github.com/actions-runner-controller/actions-runner-controller/hack/sigrel
|
||||||
|
|
||||||
|
go 1.17
|
||||||
325
hack/signrel/main.go
Normal file
325
hack/signrel/main.go
Normal file
@@ -0,0 +1,325 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
a := &githubReleaseAsset{}
|
||||||
|
|
||||||
|
if len(os.Args) != 2 {
|
||||||
|
fmt.Fprintf(os.Stderr, "Invalid command: %v\n", os.Args)
|
||||||
|
fmt.Fprintf(os.Stderr, "USAGE: signrel [list-tags|sign-assets]\n")
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch cmd := os.Args[1]; cmd {
|
||||||
|
case "tags":
|
||||||
|
listTags(a)
|
||||||
|
case "sign":
|
||||||
|
tag := os.Getenv("TAG")
|
||||||
|
sign(a, tag)
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(os.Stderr, "Unknown command %s\n", cmd)
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func listTags(a *githubReleaseAsset) {
|
||||||
|
_, err := a.getRecentReleases(owner, repo)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sign(a *githubReleaseAsset, tag string) {
|
||||||
|
if err := a.Download(tag, "downloads"); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Release struct {
|
||||||
|
ID int64 `json:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Asset struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
ID int64 `json:"id"`
|
||||||
|
URL string `json:"url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AssetsResponse struct {
|
||||||
|
Assets []Asset
|
||||||
|
}
|
||||||
|
|
||||||
|
type githubReleaseAsset struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
owner = "actions-runner-controller"
|
||||||
|
repo = "actions-runner-controller"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetFile downloads the give URL into the given path. The URL must
|
||||||
|
// reference a single file. If possible, the Getter should check if
|
||||||
|
// the remote end contains the same file and no-op this operation.
|
||||||
|
func (a *githubReleaseAsset) Download(tag string, dstDir string) error {
|
||||||
|
release, err := a.getReleaseByTag(owner, repo, tag)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
assets, err := a.getAssetsByReleaseID(owner, repo, release.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d := filepath.Join(dstDir, tag)
|
||||||
|
if err := os.MkdirAll(d, 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, asset := range assets {
|
||||||
|
if strings.HasSuffix(asset.Name, ".asc") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
p := filepath.Join(d, asset.Name)
|
||||||
|
fmt.Fprintf(os.Stderr, "Downloading %s to %s\n", asset.Name, p)
|
||||||
|
if err := a.getFile(p, owner, repo, asset.ID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if info, _ := os.Stat(p + ".asc"); info == nil {
|
||||||
|
_, err := a.sign(p)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sig := p + ".asc"
|
||||||
|
|
||||||
|
fmt.Fprintf(os.Stdout, "Uploading %s\n", sig)
|
||||||
|
|
||||||
|
if err := a.upload(sig, release.ID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *githubReleaseAsset) getRecentReleases(owner, repo string) (*Release, error) {
|
||||||
|
reqURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases", owner, repo)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", reqURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if gt := os.Getenv("GITHUB_TOKEN"); gt != "" {
|
||||||
|
req.Header = make(http.Header)
|
||||||
|
req.Header.Add("authorization", "token "+gt)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("GET %s: %s", reqURL, res.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := io.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(os.Stdout, "%s\n", string(body))
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *githubReleaseAsset) getReleaseByTag(owner, repo, tag string) (*Release, error) {
|
||||||
|
var release Release
|
||||||
|
|
||||||
|
reqURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/tags/%s", owner, repo, tag)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", reqURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if gt := os.Getenv("GITHUB_TOKEN"); gt != "" {
|
||||||
|
req.Header = make(http.Header)
|
||||||
|
req.Header.Add("authorization", "token "+gt)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("GET %s: %s", reqURL, res.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
d := json.NewDecoder(res.Body)
|
||||||
|
|
||||||
|
if err := d.Decode(&release); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &release, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *githubReleaseAsset) getAssetsByReleaseID(owner, repo string, releaseID int64) ([]Asset, error) {
|
||||||
|
var assets []Asset
|
||||||
|
|
||||||
|
reqURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/%d/assets", owner, repo, releaseID)
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", reqURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if gt := os.Getenv("GITHUB_TOKEN"); gt != "" {
|
||||||
|
req.Header = make(http.Header)
|
||||||
|
req.Header.Add("authorization", "token "+gt)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("GET %s: %s", reqURL, res.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
d := json.NewDecoder(res.Body)
|
||||||
|
|
||||||
|
if err := d.Decode(&assets); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return assets, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *githubReleaseAsset) getFile(dst string, owner, repo string, assetID int64) error {
|
||||||
|
// Create all the parent directories if needed
|
||||||
|
dir := filepath.Dir(dst)
|
||||||
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||||
|
return fmt.Errorf("mkdir %s: %w", dir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/assets/%d", owner, repo, assetID), nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
req.Header = make(http.Header)
|
||||||
|
if gt := os.Getenv("GITHUB_TOKEN"); gt != "" {
|
||||||
|
req.Header.Add("authorization", "token "+gt)
|
||||||
|
}
|
||||||
|
req.Header.Add("accept", "application/octet-stream")
|
||||||
|
|
||||||
|
res, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, os.FileMode(0666))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("open file %s: %w", dst, err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if _, err := io.Copy(f, res.Body); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *githubReleaseAsset) sign(path string) (string, error) {
|
||||||
|
pass := os.Getenv("SIGNREL_PASSWORD")
|
||||||
|
cmd := exec.Command("gpg", "--armor", "--detach-sign", "--pinentry-mode", "loopback", "--passphrase", pass, path)
|
||||||
|
cap, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "gpg: %s", string(cap))
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return path + ".asc", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *githubReleaseAsset) upload(sig string, releaseID int64) error {
|
||||||
|
assetName := filepath.Base(sig)
|
||||||
|
url := fmt.Sprintf("https://uploads.github.com/repos/%s/%s/releases/%d/assets?name=%s", owner, repo, releaseID, assetName)
|
||||||
|
f, err := os.Open(sig)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
size, err := f.Seek(0, 2)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = f.Seek(0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("POST", url, f)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
req.Header = make(http.Header)
|
||||||
|
if gt := os.Getenv("GITHUB_TOKEN"); gt != "" {
|
||||||
|
req.Header.Add("authorization", "token "+gt)
|
||||||
|
}
|
||||||
|
req.Header.Add("content-type", "application/octet-stream")
|
||||||
|
|
||||||
|
req.ContentLength = size
|
||||||
|
req.Header.Add("accept", "application/vnd.github.v3+json")
|
||||||
|
|
||||||
|
// blob, _ := httputil.DumpRequestOut(req, true)
|
||||||
|
// fmt.Printf("%s\n", blob)
|
||||||
|
|
||||||
|
res, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
body, err := io.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.StatusCode == 422 {
|
||||||
|
fmt.Fprintf(os.Stdout, "%s has been already uploaded\n", sig)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.StatusCode >= 300 {
|
||||||
|
return fmt.Errorf("unexpected http status %d: %s", res.StatusCode, body)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(os.Stdout, "Upload completed: %s\n", body)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
36
main.go
36
main.go
@@ -71,6 +71,7 @@ func main() {
|
|||||||
metricsAddr string
|
metricsAddr string
|
||||||
enableLeaderElection bool
|
enableLeaderElection bool
|
||||||
leaderElectionId string
|
leaderElectionId string
|
||||||
|
port int
|
||||||
syncPeriod time.Duration
|
syncPeriod time.Duration
|
||||||
|
|
||||||
gitHubAPICacheDuration time.Duration
|
gitHubAPICacheDuration time.Duration
|
||||||
@@ -98,8 +99,8 @@ func main() {
|
|||||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
||||||
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||||
flag.StringVar(&leaderElectionId, "leader-election-id", "actions-runner-controller", "Controller id for leader election.")
|
flag.StringVar(&leaderElectionId, "leader-election-id", "actions-runner-controller", "Controller id for leader election.")
|
||||||
flag.StringVar(&runnerImage, "runner-image", defaultRunnerImage, "The image name of self-hosted runner container.")
|
flag.StringVar(&runnerImage, "runner-image", defaultRunnerImage, "The image name of self-hosted runner container to use by default if one isn't defined in yaml.")
|
||||||
flag.StringVar(&dockerImage, "docker-image", defaultDockerImage, "The image name of docker sidecar container.")
|
flag.StringVar(&dockerImage, "docker-image", defaultDockerImage, "The image name of docker sidecar container to use by default if one isn't defined in yaml.")
|
||||||
flag.Var(&runnerImagePullSecrets, "runner-image-pull-secret", "The default image-pull secret name for self-hosted runner container.")
|
flag.Var(&runnerImagePullSecrets, "runner-image-pull-secret", "The default image-pull secret name for self-hosted runner container.")
|
||||||
flag.StringVar(&dockerRegistryMirror, "docker-registry-mirror", "", "The default Docker Registry Mirror used by runners.")
|
flag.StringVar(&dockerRegistryMirror, "docker-registry-mirror", "", "The default Docker Registry Mirror used by runners.")
|
||||||
flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.")
|
flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.")
|
||||||
@@ -113,6 +114,7 @@ func main() {
|
|||||||
flag.StringVar(&c.RunnerGitHubURL, "runner-github-url", c.RunnerGitHubURL, "GitHub URL to be used by runners during registration")
|
flag.StringVar(&c.RunnerGitHubURL, "runner-github-url", c.RunnerGitHubURL, "GitHub URL to be used by runners during registration")
|
||||||
flag.DurationVar(&gitHubAPICacheDuration, "github-api-cache-duration", 0, "DEPRECATED: The duration until the GitHub API cache expires. Setting this to e.g. 10m results in the controller tries its best not to make the same API call within 10m to reduce the chance of being rate-limited. Defaults to mostly the same value as sync-period. If you're tweaking this in order to make autoscaling more responsive, you'll probably want to tweak sync-period, too")
|
flag.DurationVar(&gitHubAPICacheDuration, "github-api-cache-duration", 0, "DEPRECATED: The duration until the GitHub API cache expires. Setting this to e.g. 10m results in the controller tries its best not to make the same API call within 10m to reduce the chance of being rate-limited. Defaults to mostly the same value as sync-period. If you're tweaking this in order to make autoscaling more responsive, you'll probably want to tweak sync-period, too")
|
||||||
flag.DurationVar(&defaultScaleDownDelay, "default-scale-down-delay", controllers.DefaultScaleDownDelay, "The approximate delay for a scale down followed by a scale up, used to prevent flapping (down->up->down->... loop)")
|
flag.DurationVar(&defaultScaleDownDelay, "default-scale-down-delay", controllers.DefaultScaleDownDelay, "The approximate delay for a scale down followed by a scale up, used to prevent flapping (down->up->down->... loop)")
|
||||||
|
flag.IntVar(&port, "port", 9443, "The port to which the admission webhook endpoint should bind")
|
||||||
flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.")
|
flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.")
|
||||||
flag.Var(&commonRunnerLabels, "common-runner-labels", "Runner labels in the K1=V1,K2=V2,... format that are inherited all the runners created by the controller. See https://github.com/actions-runner-controller/actions-runner-controller/issues/321 for more information")
|
flag.Var(&commonRunnerLabels, "common-runner-labels", "Runner labels in the K1=V1,K2=V2,... format that are inherited all the runners created by the controller. See https://github.com/actions-runner-controller/actions-runner-controller/issues/321 for more information")
|
||||||
flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.")
|
flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.")
|
||||||
@@ -136,7 +138,7 @@ func main() {
|
|||||||
MetricsBindAddress: metricsAddr,
|
MetricsBindAddress: metricsAddr,
|
||||||
LeaderElection: enableLeaderElection,
|
LeaderElection: enableLeaderElection,
|
||||||
LeaderElectionID: leaderElectionId,
|
LeaderElectionID: leaderElectionId,
|
||||||
Port: 9443,
|
Port: port,
|
||||||
SyncPeriod: &syncPeriod,
|
SyncPeriod: &syncPeriod,
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
})
|
})
|
||||||
@@ -216,9 +218,11 @@ func main() {
|
|||||||
"github-api-cache-duration", gitHubAPICacheDuration,
|
"github-api-cache-duration", gitHubAPICacheDuration,
|
||||||
"default-scale-down-delay", defaultScaleDownDelay,
|
"default-scale-down-delay", defaultScaleDownDelay,
|
||||||
"sync-period", syncPeriod,
|
"sync-period", syncPeriod,
|
||||||
"runner-image", runnerImage,
|
"default-runner-image", runnerImage,
|
||||||
"docker-image", dockerImage,
|
"default-docker-image", dockerImage,
|
||||||
"common-runnner-labels", commonRunnerLabels,
|
"common-runnner-labels", commonRunnerLabels,
|
||||||
|
"leader-election-enabled", enableLeaderElection,
|
||||||
|
"leader-election-id", leaderElectionId,
|
||||||
"watch-namespace", namespace,
|
"watch-namespace", namespace,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -238,6 +242,18 @@ func main() {
|
|||||||
GitHubClient: ghClient,
|
GitHubClient: ghClient,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
runnerPersistentVolumeReconciler := &controllers.RunnerPersistentVolumeReconciler{
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
Log: log.WithName("runnerpersistentvolume"),
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
|
}
|
||||||
|
|
||||||
|
runnerPersistentVolumeClaimReconciler := &controllers.RunnerPersistentVolumeClaimReconciler{
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
Log: log.WithName("runnerpersistentvolumeclaim"),
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
|
}
|
||||||
|
|
||||||
if err = runnerPodReconciler.SetupWithManager(mgr); err != nil {
|
if err = runnerPodReconciler.SetupWithManager(mgr); err != nil {
|
||||||
log.Error(err, "unable to create controller", "controller", "RunnerPod")
|
log.Error(err, "unable to create controller", "controller", "RunnerPod")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
@@ -248,6 +264,16 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err = runnerPersistentVolumeReconciler.SetupWithManager(mgr); err != nil {
|
||||||
|
log.Error(err, "unable to create controller", "controller", "RunnerPersistentVolume")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = runnerPersistentVolumeClaimReconciler.SetupWithManager(mgr); err != nil {
|
||||||
|
log.Error(err, "unable to create controller", "controller", "RunnerPersistentVolumeClaim")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
if err = (&actionsv1alpha1.Runner{}).SetupWebhookWithManager(mgr); err != nil {
|
if err = (&actionsv1alpha1.Runner{}).SetupWebhookWithManager(mgr); err != nil {
|
||||||
log.Error(err, "unable to create webhook", "webhook", "Runner")
|
log.Error(err, "unable to create webhook", "webhook", "Runner")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
gogithub "github.com/google/go-github/v39/github"
|
gogithub "github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
type server struct {
|
type server struct {
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
gogithub "github.com/google/go-github/v39/github"
|
gogithub "github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Forwarder struct {
|
type Forwarder struct {
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package hookdeliveryforwarder
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
gogithub "github.com/google/go-github/v39/github"
|
gogithub "github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
type hooksAPI struct {
|
type hooksAPI struct {
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package hookdeliveryforwarder
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
gogithub "github.com/google/go-github/v39/github"
|
gogithub "github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
type hookDeliveriesAPI struct {
|
type hookDeliveriesAPI struct {
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
gogithub "github.com/google/go-github/v39/github"
|
gogithub "github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MultiForwarder struct {
|
type MultiForwarder struct {
|
||||||
|
|||||||
2
runner/.dockerignore
Normal file
2
runner/.dockerignore
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
*.dockerfile
|
||||||
|
Makefile
|
||||||
@@ -4,7 +4,8 @@ DIND_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind
|
|||||||
TAG ?= latest
|
TAG ?= latest
|
||||||
TARGETPLATFORM ?= $(shell arch)
|
TARGETPLATFORM ?= $(shell arch)
|
||||||
|
|
||||||
RUNNER_VERSION ?= 2.290.1
|
RUNNER_VERSION ?= 2.294.0
|
||||||
|
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.1.2
|
||||||
DOCKER_VERSION ?= 20.10.12
|
DOCKER_VERSION ?= 20.10.12
|
||||||
|
|
||||||
# default list of platforms for which multiarch image is built
|
# default list of platforms for which multiarch image is built
|
||||||
@@ -25,8 +26,19 @@ else
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
docker-build-ubuntu:
|
docker-build-ubuntu:
|
||||||
docker build --build-arg TARGETPLATFORM=${TARGETPLATFORM} --build-arg RUNNER_VERSION=${RUNNER_VERSION} --build-arg DOCKER_VERSION=${DOCKER_VERSION} -t ${NAME}:${TAG} .
|
docker build \
|
||||||
docker build --build-arg TARGETPLATFORM=${TARGETPLATFORM} --build-arg RUNNER_VERSION=${RUNNER_VERSION} --build-arg DOCKER_VERSION=${DOCKER_VERSION} -t ${DIND_RUNNER_NAME}:${TAG} -f Dockerfile.dindrunner .
|
--build-arg TARGETPLATFORM=${TARGETPLATFORM} \
|
||||||
|
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
||||||
|
--build-arg RUNNER_CONTAINER_HOOKS_VERSION=${RUNNER_CONTAINER_HOOKS_VERSION} \
|
||||||
|
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
||||||
|
-f actions-runner.dockerfile \
|
||||||
|
-t ${NAME}:${TAG} .
|
||||||
|
docker build \
|
||||||
|
--build-arg TARGETPLATFORM=${TARGETPLATFORM} \
|
||||||
|
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
||||||
|
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
||||||
|
-f actions-runner-dind.dockerfile \
|
||||||
|
-t ${DIND_RUNNER_NAME}:${TAG} .
|
||||||
|
|
||||||
docker-push-ubuntu:
|
docker-push-ubuntu:
|
||||||
docker push ${NAME}:${TAG}
|
docker push ${NAME}:${TAG}
|
||||||
@@ -40,13 +52,15 @@ docker-buildx-ubuntu:
|
|||||||
fi
|
fi
|
||||||
docker buildx build --platform ${PLATFORMS} \
|
docker buildx build --platform ${PLATFORMS} \
|
||||||
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
||||||
|
--build-arg RUNNER_CONTAINER_HOOKS_VERSION=${RUNNER_CONTAINER_HOOKS_VERSION} \
|
||||||
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
||||||
|
-f actions-runner.dockerfile \
|
||||||
-t "${NAME}:${TAG}" \
|
-t "${NAME}:${TAG}" \
|
||||||
-f Dockerfile \
|
|
||||||
. ${PUSH_ARG}
|
. ${PUSH_ARG}
|
||||||
docker buildx build --platform ${PLATFORMS} \
|
docker buildx build --platform ${PLATFORMS} \
|
||||||
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
||||||
|
--build-arg RUNNER_CONTAINER_HOOKS_VERSION=${RUNNER_CONTAINER_HOOKS_VERSION} \
|
||||||
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
||||||
|
-f actions-runner-dind.dockerfile \
|
||||||
-t "${DIND_RUNNER_NAME}:${TAG}" \
|
-t "${DIND_RUNNER_NAME}:${TAG}" \
|
||||||
-f Dockerfile.dindrunner \
|
|
||||||
. ${PUSH_ARG}
|
. ${PUSH_ARG}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
ARG RUNNER_VERSION=2.290.1
|
ARG RUNNER_VERSION=2.294.0
|
||||||
ARG DOCKER_CHANNEL=stable
|
ARG DOCKER_CHANNEL=stable
|
||||||
ARG DOCKER_VERSION=20.10.12
|
ARG DOCKER_VERSION=20.10.12
|
||||||
ARG DUMB_INIT_VERSION=1.2.5
|
ARG DUMB_INIT_VERSION=1.2.5
|
||||||
@@ -74,8 +74,6 @@ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
|||||||
dockerd --version; \
|
dockerd --version; \
|
||||||
docker --version
|
docker --version
|
||||||
|
|
||||||
ENV HOME=/home/runner
|
|
||||||
|
|
||||||
# Runner download supports amd64 as x64
|
# Runner download supports amd64 as x64
|
||||||
#
|
#
|
||||||
# libyaml-dev is required for ruby/setup-ruby action.
|
# libyaml-dev is required for ruby/setup-ruby action.
|
||||||
@@ -113,6 +111,7 @@ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
|||||||
|
|
||||||
VOLUME /var/lib/docker
|
VOLUME /var/lib/docker
|
||||||
|
|
||||||
|
ENV HOME=/home/runner
|
||||||
# Add the Python "User Script Directory" to the PATH
|
# Add the Python "User Script Directory" to the PATH
|
||||||
ENV PATH="${PATH}:${HOME}/.local/bin"
|
ENV PATH="${PATH}:${HOME}/.local/bin"
|
||||||
ENV ImageOS=ubuntu20
|
ENV ImageOS=ubuntu20
|
||||||
@@ -1,7 +1,8 @@
|
|||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
ARG RUNNER_VERSION=2.290.1
|
ARG RUNNER_VERSION=2.294.0
|
||||||
|
ARG RUNNER_CONTAINER_HOOKS_VERSION=0.1.2
|
||||||
ARG DOCKER_CHANNEL=stable
|
ARG DOCKER_CHANNEL=stable
|
||||||
ARG DOCKER_VERSION=20.10.12
|
ARG DOCKER_VERSION=20.10.12
|
||||||
ARG DUMB_INIT_VERSION=1.2.5
|
ARG DUMB_INIT_VERSION=1.2.5
|
||||||
@@ -66,8 +67,6 @@ RUN set -vx; \
|
|||||||
&& usermod -aG docker runner \
|
&& usermod -aG docker runner \
|
||||||
&& echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers
|
&& echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers
|
||||||
|
|
||||||
ENV HOME=/home/runner
|
|
||||||
|
|
||||||
# Uncomment the below COPY to use your own custom build of actions-runner.
|
# Uncomment the below COPY to use your own custom build of actions-runner.
|
||||||
#
|
#
|
||||||
# To build a custom runner:
|
# To build a custom runner:
|
||||||
@@ -105,6 +104,11 @@ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
|||||||
&& apt-get install -y libyaml-dev \
|
&& apt-get install -y libyaml-dev \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN cd "$RUNNER_ASSETS_DIR" \
|
||||||
|
&& curl -f -L -o runner-container-hooks.zip https://github.com/actions/runner-container-hooks/releases/download/v${RUNNER_CONTAINER_HOOKS_VERSION}/actions-runner-hooks-k8s-${RUNNER_CONTAINER_HOOKS_VERSION}.zip \
|
||||||
|
&& unzip ./runner-container-hooks.zip -d ./k8s \
|
||||||
|
&& rm runner-container-hooks.zip
|
||||||
|
|
||||||
ENV RUNNER_TOOL_CACHE=/opt/hostedtoolcache
|
ENV RUNNER_TOOL_CACHE=/opt/hostedtoolcache
|
||||||
RUN mkdir /opt/hostedtoolcache \
|
RUN mkdir /opt/hostedtoolcache \
|
||||||
&& chgrp docker /opt/hostedtoolcache \
|
&& chgrp docker /opt/hostedtoolcache \
|
||||||
@@ -114,6 +118,7 @@ RUN mkdir /opt/hostedtoolcache \
|
|||||||
# override them with scripts of the same name placed in `/usr/local/bin`.
|
# override them with scripts of the same name placed in `/usr/local/bin`.
|
||||||
COPY entrypoint.sh logger.bash /usr/bin/
|
COPY entrypoint.sh logger.bash /usr/bin/
|
||||||
|
|
||||||
|
ENV HOME=/home/runner
|
||||||
# Add the Python "User Script Directory" to the PATH
|
# Add the Python "User Script Directory" to the PATH
|
||||||
ENV PATH="${PATH}:${HOME}/.local/bin"
|
ENV PATH="${PATH}:${HOME}/.local/bin"
|
||||||
ENV ImageOS=ubuntu20
|
ENV ImageOS=ubuntu20
|
||||||
@@ -9,14 +9,6 @@ if [ ! -z "${STARTUP_DELAY_IN_SECONDS}" ]; then
|
|||||||
sleep ${STARTUP_DELAY_IN_SECONDS}
|
sleep ${STARTUP_DELAY_IN_SECONDS}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${DISABLE_WAIT_FOR_DOCKER}" != "true" ]] && [[ "${DOCKER_ENABLED}" == "true" ]]; then
|
|
||||||
log.debug 'Docker enabled runner detected and Docker daemon wait is enabled'
|
|
||||||
log.debug 'Waiting until Docker is available or the timeout is reached'
|
|
||||||
timeout 120s bash -c 'until docker ps ;do sleep 1; done'
|
|
||||||
else
|
|
||||||
log.notice 'Docker wait check skipped. Either Docker is disabled or the wait is disabled, continuing with entrypoint'
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "${GITHUB_URL}" ]; then
|
if [ -z "${GITHUB_URL}" ]; then
|
||||||
log.debug 'Working with public GitHub'
|
log.debug 'Working with public GitHub'
|
||||||
GITHUB_URL="https://github.com/"
|
GITHUB_URL="https://github.com/"
|
||||||
@@ -76,7 +68,7 @@ cd ${RUNNER_HOME}
|
|||||||
# past that point, it's all relative pathes from /runner
|
# past that point, it's all relative pathes from /runner
|
||||||
|
|
||||||
config_args=()
|
config_args=()
|
||||||
if [ "${RUNNER_FEATURE_FLAG_EPHEMERAL:-}" == "true" -a "${RUNNER_EPHEMERAL}" == "true" ]; then
|
if [ "${RUNNER_FEATURE_FLAG_ONCE:-}" != "true" -a "${RUNNER_EPHEMERAL}" == "true" ]; then
|
||||||
config_args+=(--ephemeral)
|
config_args+=(--ephemeral)
|
||||||
log.debug 'Passing --ephemeral to config.sh to enable the ephemeral runner.'
|
log.debug 'Passing --ephemeral to config.sh to enable the ephemeral runner.'
|
||||||
fi
|
fi
|
||||||
@@ -134,19 +126,18 @@ cat .runner
|
|||||||
# -H "Authorization: bearer ${GITHUB_TOKEN}"
|
# -H "Authorization: bearer ${GITHUB_TOKEN}"
|
||||||
# https://api.github.com/repos/USER/REPO/actions/runners/171
|
# https://api.github.com/repos/USER/REPO/actions/runners/171
|
||||||
|
|
||||||
if [ -z "${UNITTEST:-}" ]; then
|
# Hack due to the DinD volumes
|
||||||
|
if [ -z "${UNITTEST:-}" ] && [ -e ./externalstmp ]; then
|
||||||
mkdir -p ./externals
|
mkdir -p ./externals
|
||||||
# Hack due to the DinD volumes
|
|
||||||
mv ./externalstmp/* ./externals/
|
mv ./externalstmp/* ./externals/
|
||||||
fi
|
fi
|
||||||
|
|
||||||
args=()
|
if [[ "${DISABLE_WAIT_FOR_DOCKER}" != "true" ]] && [[ "${DOCKER_ENABLED}" == "true" ]]; then
|
||||||
if [ "${RUNNER_FEATURE_FLAG_EPHEMERAL:-}" != "true" -a "${RUNNER_EPHEMERAL}" == "true" ]; then
|
log.debug 'Docker enabled runner detected and Docker daemon wait is enabled'
|
||||||
args+=(--once)
|
log.debug 'Waiting until Docker is available or the timeout is reached'
|
||||||
log.warning 'Passing --once is deprecated and will be removed as an option' \
|
timeout 120s bash -c 'until docker ps ;do sleep 1; done'
|
||||||
'from the image and actions-runner-controller at the release of 0.24.0.' \
|
else
|
||||||
'Upgrade to GHES => 3.3 to continue using actions-runner-controller. If' \
|
log.notice 'Docker wait check skipped. Either Docker is disabled or the wait is disabled, continuing with entrypoint'
|
||||||
'you are using github.com ignore this warning.'
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Unset entrypoint environment variables so they don't leak into the runner environment
|
# Unset entrypoint environment variables so they don't leak into the runner environment
|
||||||
@@ -164,4 +155,4 @@ unset RUNNER_NAME RUNNER_REPO RUNNER_TOKEN STARTUP_DELAY_IN_SECONDS DISABLE_WAIT
|
|||||||
if [ -z "${UNITTEST:-}" ]; then
|
if [ -z "${UNITTEST:-}" ]; then
|
||||||
mapfile -t env </etc/environment
|
mapfile -t env </etc/environment
|
||||||
fi
|
fi
|
||||||
exec env -- "${env[@]}" ./run.sh "${args[@]}"
|
exec env -- "${env[@]}" ./run.sh
|
||||||
|
|||||||
@@ -20,7 +20,9 @@ function wait_for_process () {
|
|||||||
sudo /bin/bash <<SCRIPT
|
sudo /bin/bash <<SCRIPT
|
||||||
mkdir -p /etc/docker
|
mkdir -p /etc/docker
|
||||||
|
|
||||||
echo "{}" > /etc/docker/daemon.json
|
if [ ! -f /etc/docker/daemon.json ]; then
|
||||||
|
echo "{}" > /etc/docker/daemon.json
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -n "${MTU}" ]; then
|
if [ -n "${MTU}" ]; then
|
||||||
jq ".\"mtu\" = ${MTU}" /etc/docker/daemon.json > /tmp/.daemon.json && mv /tmp/.daemon.json /etc/docker/daemon.json
|
jq ".\"mtu\" = ${MTU}" /etc/docker/daemon.json > /tmp/.daemon.json && mv /tmp/.daemon.json /etc/docker/daemon.json
|
||||||
|
|||||||
@@ -5,10 +5,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Simulator struct {
|
type Simulator struct {
|
||||||
Client *github.Client
|
Client *github.Client
|
||||||
|
Log logr.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Simulator) GetRunnerGroupsVisibleToRepository(ctx context.Context, org, repo string, managed *VisibleRunnerGroups) (*VisibleRunnerGroups, error) {
|
func (c *Simulator) GetRunnerGroupsVisibleToRepository(ctx context.Context, org, repo string, managed *VisibleRunnerGroups) (*VisibleRunnerGroups, error) {
|
||||||
@@ -24,6 +26,10 @@ func (c *Simulator) GetRunnerGroupsVisibleToRepository(ctx context.Context, org,
|
|||||||
return visible, err
|
return visible, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.Log.V(3).Enabled() {
|
||||||
|
c.Log.V(3).Info("ListOrganizationRunnerGroupsForRepository succeeded", "runerGroups", runnerGroups)
|
||||||
|
}
|
||||||
|
|
||||||
for _, runnerGroup := range runnerGroups {
|
for _, runnerGroup := range runnerGroups {
|
||||||
ref := NewRunnerGroupFromGitHub(runnerGroup)
|
ref := NewRunnerGroupFromGitHub(runnerGroup)
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RunnerGroupScope int
|
type RunnerGroupScope int
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package e2e
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
@@ -12,6 +13,13 @@ import (
|
|||||||
"sigs.k8s.io/yaml"
|
"sigs.k8s.io/yaml"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type DeployKind int
|
||||||
|
|
||||||
|
const (
|
||||||
|
RunnerSets DeployKind = iota
|
||||||
|
RunnerDeployments
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
controllerImageRepo = "actionsrunnercontrollere2e/actions-runner-controller"
|
controllerImageRepo = "actionsrunnercontrollere2e/actions-runner-controller"
|
||||||
controllerImageTag = "e2e"
|
controllerImageTag = "e2e"
|
||||||
@@ -36,24 +44,26 @@ var (
|
|||||||
EnableBuildX: true,
|
EnableBuildX: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Dockerfile: "../../runner/Dockerfile",
|
Dockerfile: "../../runner/actions-runner.dockerfile",
|
||||||
Args: []testing.BuildArg{
|
Args: []testing.BuildArg{
|
||||||
{
|
{
|
||||||
Name: "RUNNER_VERSION",
|
Name: "RUNNER_VERSION",
|
||||||
Value: "2.289.2",
|
Value: "2.294.0",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Image: runnerImage,
|
Image: runnerImage,
|
||||||
|
EnableBuildX: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Dockerfile: "../../runner/Dockerfile.dindrunner",
|
Dockerfile: "../../runner/actions-runner-dind.dockerfile",
|
||||||
Args: []testing.BuildArg{
|
Args: []testing.BuildArg{
|
||||||
{
|
{
|
||||||
Name: "RUNNER_VERSION",
|
Name: "RUNNER_VERSION",
|
||||||
Value: "2.289.2",
|
Value: "2.294.0",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Image: runnerDindImage,
|
Image: runnerDindImage,
|
||||||
|
EnableBuildX: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -68,9 +78,7 @@ var (
|
|||||||
}
|
}
|
||||||
|
|
||||||
commonScriptEnv = []string{
|
commonScriptEnv = []string{
|
||||||
"SYNC_PERIOD=" + "30m",
|
"SYNC_PERIOD=" + "30s",
|
||||||
"NAME=" + controllerImageRepo,
|
|
||||||
"VERSION=" + controllerImageTag,
|
|
||||||
"RUNNER_TAG=" + runnerImageTag,
|
"RUNNER_TAG=" + runnerImageTag,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -98,13 +106,27 @@ var (
|
|||||||
// whenever the whole test failed, so that you can immediately start fixing issues and rerun inidividual tests.
|
// whenever the whole test failed, so that you can immediately start fixing issues and rerun inidividual tests.
|
||||||
// See the below link for how terratest handles this:
|
// See the below link for how terratest handles this:
|
||||||
// https://terratest.gruntwork.io/docs/testing-best-practices/iterating-locally-using-test-stages/
|
// https://terratest.gruntwork.io/docs/testing-best-practices/iterating-locally-using-test-stages/
|
||||||
|
//
|
||||||
|
// This functions leaves PVs undeleted. To delete PVs, run:
|
||||||
|
// kubectl get pv -ojson | jq -rMc '.items[] | select(.status.phase == "Available") | {name:.metadata.name, status:.status.phase} | .name' | xargs kubectl delete pv
|
||||||
|
//
|
||||||
|
// If you disk full after dozens of test runs, try:
|
||||||
|
// docker system prune
|
||||||
|
// and
|
||||||
|
// kind delete cluster --name teste2e
|
||||||
|
//
|
||||||
|
// The former tend to release 200MB-3GB and the latter can result in releasing like 100GB due to kind node contains loaded container images and
|
||||||
|
// (in case you use it) local provisioners disk image(which is implemented as a directory within the kind node).
|
||||||
func TestE2E(t *testing.T) {
|
func TestE2E(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("Skipped as -short is set")
|
t.Skip("Skipped as -short is set")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
skipRunnerCleanUp := os.Getenv("ARC_E2E_SKIP_RUNNER_CLEANUP") != ""
|
||||||
|
retainCluster := os.Getenv("ARC_E2E_RETAIN_CLUSTER") != ""
|
||||||
|
skipTestIDCleanUp := os.Getenv("ARC_E2E_SKIP_TEST_ID_CLEANUP") != ""
|
||||||
|
|
||||||
env := initTestEnv(t)
|
env := initTestEnv(t)
|
||||||
env.useRunnerSet = true
|
|
||||||
|
|
||||||
t.Run("build and load images", func(t *testing.T) {
|
t.Run("build and load images", func(t *testing.T) {
|
||||||
env.buildAndLoadImages(t)
|
env.buildAndLoadImages(t)
|
||||||
@@ -118,8 +140,37 @@ func TestE2E(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("install actions-runner-controller and runners", func(t *testing.T) {
|
t.Run("RunnerSets", func(t *testing.T) {
|
||||||
env.installActionsRunnerController(t)
|
var (
|
||||||
|
testID string
|
||||||
|
)
|
||||||
|
|
||||||
|
t.Run("get or generate test ID", func(t *testing.T) {
|
||||||
|
testID = env.GetOrGenerateTestID(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
if !skipTestIDCleanUp {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
env.DeleteTestID(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("install actions-runner-controller v0.24.1", func(t *testing.T) {
|
||||||
|
env.installActionsRunnerController(t, "summerwind/actions-runner-controller", "v0.24.1", testID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("deploy runners", func(t *testing.T) {
|
||||||
|
env.deploy(t, RunnerSets, testID)
|
||||||
|
})
|
||||||
|
|
||||||
|
if !skipRunnerCleanUp {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
env.undeploy(t, RunnerSets, testID)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("install edge actions-runner-controller", func(t *testing.T) {
|
||||||
|
env.installActionsRunnerController(t, controllerImageRepo, controllerImageTag, testID)
|
||||||
})
|
})
|
||||||
|
|
||||||
if t.Failed() {
|
if t.Failed() {
|
||||||
@@ -127,7 +178,7 @@ func TestE2E(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run("Install workflow", func(t *testing.T) {
|
t.Run("Install workflow", func(t *testing.T) {
|
||||||
env.installActionsWorkflow(t)
|
env.installActionsWorkflow(t, RunnerSets, testID)
|
||||||
})
|
})
|
||||||
|
|
||||||
if t.Failed() {
|
if t.Failed() {
|
||||||
@@ -135,32 +186,41 @@ func TestE2E(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run("Verify workflow run result", func(t *testing.T) {
|
t.Run("Verify workflow run result", func(t *testing.T) {
|
||||||
env.verifyActionsWorkflowRun(t)
|
env.verifyActionsWorkflowRun(t, testID)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
}
|
|
||||||
|
|
||||||
func TestE2ERunnerDeploy(t *testing.T) {
|
t.Run("RunnerDeployments", func(t *testing.T) {
|
||||||
if testing.Short() {
|
var (
|
||||||
t.Skip("Skipped as -short is set")
|
testID string
|
||||||
|
)
|
||||||
|
|
||||||
|
t.Run("get or generate test ID", func(t *testing.T) {
|
||||||
|
testID = env.GetOrGenerateTestID(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
if !skipTestIDCleanUp {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
env.DeleteTestID(t)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
env := initTestEnv(t)
|
t.Run("install actions-runner-controller v0.24.1", func(t *testing.T) {
|
||||||
env.useApp = true
|
env.installActionsRunnerController(t, "summerwind/actions-runner-controller", "v0.24.1", testID)
|
||||||
|
|
||||||
t.Run("build and load images", func(t *testing.T) {
|
|
||||||
env.buildAndLoadImages(t)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("install cert-manager", func(t *testing.T) {
|
t.Run("deploy runners", func(t *testing.T) {
|
||||||
env.installCertManager(t)
|
env.deploy(t, RunnerDeployments, testID)
|
||||||
})
|
})
|
||||||
|
|
||||||
if t.Failed() {
|
if !skipRunnerCleanUp {
|
||||||
return
|
t.Cleanup(func() {
|
||||||
|
env.undeploy(t, RunnerDeployments, testID)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("install actions-runner-controller and runners", func(t *testing.T) {
|
t.Run("install edge actions-runner-controller", func(t *testing.T) {
|
||||||
env.installActionsRunnerController(t)
|
env.installActionsRunnerController(t, controllerImageRepo, controllerImageTag, testID)
|
||||||
})
|
})
|
||||||
|
|
||||||
if t.Failed() {
|
if t.Failed() {
|
||||||
@@ -168,7 +228,7 @@ func TestE2ERunnerDeploy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run("Install workflow", func(t *testing.T) {
|
t.Run("Install workflow", func(t *testing.T) {
|
||||||
env.installActionsWorkflow(t)
|
env.installActionsWorkflow(t, RunnerDeployments, testID)
|
||||||
})
|
})
|
||||||
|
|
||||||
if t.Failed() {
|
if t.Failed() {
|
||||||
@@ -176,33 +236,32 @@ func TestE2ERunnerDeploy(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run("Verify workflow run result", func(t *testing.T) {
|
t.Run("Verify workflow run result", func(t *testing.T) {
|
||||||
env.verifyActionsWorkflowRun(t)
|
env.verifyActionsWorkflowRun(t, testID)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if retainCluster {
|
||||||
t.FailNow()
|
t.FailNow()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type env struct {
|
type env struct {
|
||||||
*testing.Env
|
*testing.Env
|
||||||
|
|
||||||
useRunnerSet bool
|
|
||||||
// Uses GITHUB_APP_ID, GITHUB_APP_INSTALLATION_ID, and GITHUB_APP_PRIVATE_KEY
|
// Uses GITHUB_APP_ID, GITHUB_APP_INSTALLATION_ID, and GITHUB_APP_PRIVATE_KEY
|
||||||
// to let ARC authenticate as a GitHub App
|
// to let ARC authenticate as a GitHub App
|
||||||
useApp bool
|
useApp bool
|
||||||
|
|
||||||
testID string
|
|
||||||
testName string
|
testName string
|
||||||
repoToCommit string
|
repoToCommit string
|
||||||
appID, appInstallationID, appPrivateKeyFile string
|
appID, appInstallationID, appPrivateKeyFile string
|
||||||
runnerLabel, githubToken, testRepo, testOrg, testOrgRepo string
|
githubToken, testRepo, testOrg, testOrgRepo string
|
||||||
githubTokenWebhook string
|
githubTokenWebhook string
|
||||||
testEnterprise string
|
testEnterprise string
|
||||||
testEphemeral string
|
testEphemeral string
|
||||||
featureFlagEphemeral *bool
|
|
||||||
scaleDownDelaySecondsAfterScaleOut int64
|
scaleDownDelaySecondsAfterScaleOut int64
|
||||||
minReplicas int64
|
minReplicas int64
|
||||||
dockerdWithinRunnerContainer bool
|
dockerdWithinRunnerContainer bool
|
||||||
testJobs []job
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func initTestEnv(t *testing.T) *env {
|
func initTestEnv(t *testing.T) *env {
|
||||||
@@ -212,15 +271,11 @@ func initTestEnv(t *testing.T) *env {
|
|||||||
|
|
||||||
e := &env{Env: testingEnv}
|
e := &env{Env: testingEnv}
|
||||||
|
|
||||||
id := e.ID()
|
testName := t.Name()
|
||||||
|
|
||||||
testName := t.Name() + " " + id
|
|
||||||
|
|
||||||
t.Logf("Initializing test with name %s", testName)
|
t.Logf("Initializing test with name %s", testName)
|
||||||
|
|
||||||
e.testID = id
|
|
||||||
e.testName = testName
|
e.testName = testName
|
||||||
e.runnerLabel = "test-" + id
|
|
||||||
e.githubToken = testing.Getenv(t, "GITHUB_TOKEN")
|
e.githubToken = testing.Getenv(t, "GITHUB_TOKEN")
|
||||||
e.appID = testing.Getenv(t, "GITHUB_APP_ID")
|
e.appID = testing.Getenv(t, "GITHUB_APP_ID")
|
||||||
e.appInstallationID = testing.Getenv(t, "GITHUB_APP_INSTALLATION_ID")
|
e.appInstallationID = testing.Getenv(t, "GITHUB_APP_INSTALLATION_ID")
|
||||||
@@ -232,11 +287,6 @@ func initTestEnv(t *testing.T) *env {
|
|||||||
e.testOrgRepo = testing.Getenv(t, "TEST_ORG_REPO", "")
|
e.testOrgRepo = testing.Getenv(t, "TEST_ORG_REPO", "")
|
||||||
e.testEnterprise = testing.Getenv(t, "TEST_ENTERPRISE", "")
|
e.testEnterprise = testing.Getenv(t, "TEST_ENTERPRISE", "")
|
||||||
e.testEphemeral = testing.Getenv(t, "TEST_EPHEMERAL", "")
|
e.testEphemeral = testing.Getenv(t, "TEST_EPHEMERAL", "")
|
||||||
e.testJobs = createTestJobs(id, testResultCMNamePrefix, 20)
|
|
||||||
|
|
||||||
if ephemeral, err := strconv.ParseBool(testing.Getenv(t, "TEST_FEATURE_FLAG_EPHEMERAL", "")); err == nil {
|
|
||||||
e.featureFlagEphemeral = &ephemeral
|
|
||||||
}
|
|
||||||
|
|
||||||
e.scaleDownDelaySecondsAfterScaleOut, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT", "10"), 10, 32)
|
e.scaleDownDelaySecondsAfterScaleOut, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT", "10"), 10, 32)
|
||||||
e.minReplicas, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_MIN_REPLICAS", "1"), 10, 32)
|
e.minReplicas, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_MIN_REPLICAS", "1"), 10, 32)
|
||||||
@@ -277,7 +327,7 @@ func (e *env) installCertManager(t *testing.T) {
|
|||||||
e.KubectlWaitUntilDeployAvailable(t, "cert-manager", waitCfg.WithTimeout(60*time.Second))
|
e.KubectlWaitUntilDeployAvailable(t, "cert-manager", waitCfg.WithTimeout(60*time.Second))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *env) installActionsRunnerController(t *testing.T) {
|
func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
e.createControllerNamespaceAndServiceAccount(t)
|
e.createControllerNamespaceAndServiceAccount(t)
|
||||||
@@ -287,29 +337,11 @@ func (e *env) installActionsRunnerController(t *testing.T) {
|
|||||||
"ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm",
|
"ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm",
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.useRunnerSet {
|
|
||||||
scriptEnv = append(scriptEnv, "USE_RUNNERSET=1")
|
|
||||||
} else {
|
|
||||||
scriptEnv = append(scriptEnv, "USE_RUNNERSET=false")
|
|
||||||
}
|
|
||||||
|
|
||||||
varEnv := []string{
|
varEnv := []string{
|
||||||
"TEST_ENTERPRISE=" + e.testEnterprise,
|
|
||||||
"TEST_REPO=" + e.testRepo,
|
|
||||||
"TEST_ORG=" + e.testOrg,
|
|
||||||
"TEST_ORG_REPO=" + e.testOrgRepo,
|
|
||||||
"WEBHOOK_GITHUB_TOKEN=" + e.githubTokenWebhook,
|
"WEBHOOK_GITHUB_TOKEN=" + e.githubTokenWebhook,
|
||||||
"RUNNER_LABEL=" + e.runnerLabel,
|
"TEST_ID=" + testID,
|
||||||
"TEST_ID=" + e.testID,
|
"NAME=" + repo,
|
||||||
"TEST_EPHEMERAL=" + e.testEphemeral,
|
"VERSION=" + tag,
|
||||||
fmt.Sprintf("RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT=%d", e.scaleDownDelaySecondsAfterScaleOut),
|
|
||||||
fmt.Sprintf("REPO_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
|
|
||||||
fmt.Sprintf("ORG_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
|
|
||||||
fmt.Sprintf("ENTERPRISE_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.featureFlagEphemeral != nil {
|
|
||||||
varEnv = append(varEnv, fmt.Sprintf("RUNNER_FEATURE_FLAG_EPHEMERAL=%v", *e.featureFlagEphemeral))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.useApp {
|
if e.useApp {
|
||||||
@@ -326,6 +358,54 @@ func (e *env) installActionsRunnerController(t *testing.T) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
scriptEnv = append(scriptEnv, varEnv...)
|
||||||
|
scriptEnv = append(scriptEnv, commonScriptEnv...)
|
||||||
|
|
||||||
|
e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) deploy(t *testing.T, kind DeployKind, testID string) {
|
||||||
|
t.Helper()
|
||||||
|
e.do(t, "apply", kind, testID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) undeploy(t *testing.T, kind DeployKind, testID string) {
|
||||||
|
t.Helper()
|
||||||
|
e.do(t, "delete", kind, testID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) do(t *testing.T, op string, kind DeployKind, testID string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
e.createControllerNamespaceAndServiceAccount(t)
|
||||||
|
|
||||||
|
scriptEnv := []string{
|
||||||
|
"KUBECONFIG=" + e.Kubeconfig(),
|
||||||
|
"OP=" + op,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case RunnerSets:
|
||||||
|
scriptEnv = append(scriptEnv, "USE_RUNNERSET=1")
|
||||||
|
case RunnerDeployments:
|
||||||
|
scriptEnv = append(scriptEnv, "USE_RUNNERSET=false")
|
||||||
|
default:
|
||||||
|
t.Fatalf("Invalid deploy kind %v", kind)
|
||||||
|
}
|
||||||
|
|
||||||
|
varEnv := []string{
|
||||||
|
"TEST_ENTERPRISE=" + e.testEnterprise,
|
||||||
|
"TEST_REPO=" + e.testRepo,
|
||||||
|
"TEST_ORG=" + e.testOrg,
|
||||||
|
"TEST_ORG_REPO=" + e.testOrgRepo,
|
||||||
|
"RUNNER_LABEL=" + e.runnerLabel(testID),
|
||||||
|
"TEST_EPHEMERAL=" + e.testEphemeral,
|
||||||
|
fmt.Sprintf("RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT=%d", e.scaleDownDelaySecondsAfterScaleOut),
|
||||||
|
fmt.Sprintf("REPO_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
|
||||||
|
fmt.Sprintf("ORG_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
|
||||||
|
fmt.Sprintf("ENTERPRISE_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
|
||||||
|
}
|
||||||
|
|
||||||
if e.dockerdWithinRunnerContainer {
|
if e.dockerdWithinRunnerContainer {
|
||||||
varEnv = append(varEnv,
|
varEnv = append(varEnv,
|
||||||
"RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=true",
|
"RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=true",
|
||||||
@@ -341,7 +421,11 @@ func (e *env) installActionsRunnerController(t *testing.T) {
|
|||||||
scriptEnv = append(scriptEnv, varEnv...)
|
scriptEnv = append(scriptEnv, varEnv...)
|
||||||
scriptEnv = append(scriptEnv, commonScriptEnv...)
|
scriptEnv = append(scriptEnv, commonScriptEnv...)
|
||||||
|
|
||||||
e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
|
e.RunScript(t, "../../acceptance/deploy_runners.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) runnerLabel(testID string) string {
|
||||||
|
return "test-" + testID
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *env) createControllerNamespaceAndServiceAccount(t *testing.T) {
|
func (e *env) createControllerNamespaceAndServiceAccount(t *testing.T) {
|
||||||
@@ -351,16 +435,20 @@ func (e *env) createControllerNamespaceAndServiceAccount(t *testing.T) {
|
|||||||
e.KubectlEnsureClusterRoleBindingServiceAccount(t, "default-admin", "cluster-admin", "default:default", testing.KubectlConfig{})
|
e.KubectlEnsureClusterRoleBindingServiceAccount(t, "default-admin", "cluster-admin", "default:default", testing.KubectlConfig{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *env) installActionsWorkflow(t *testing.T) {
|
func (e *env) installActionsWorkflow(t *testing.T, kind DeployKind, testID string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
installActionsWorkflow(t, e.testName, e.runnerLabel, testResultCMNamePrefix, e.repoToCommit, e.testJobs)
|
installActionsWorkflow(t, e.testName+" "+testID, e.runnerLabel(testID), testResultCMNamePrefix, e.repoToCommit, kind, e.testJobs(testID))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *env) verifyActionsWorkflowRun(t *testing.T) {
|
func (e *env) testJobs(testID string) []job {
|
||||||
|
return createTestJobs(testID, testResultCMNamePrefix, 6)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) verifyActionsWorkflowRun(t *testing.T, testID string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
verifyActionsWorkflowRun(t, e.Env, e.testJobs)
|
verifyActionsWorkflowRun(t, e.Env, e.testJobs(testID))
|
||||||
}
|
}
|
||||||
|
|
||||||
type job struct {
|
type job struct {
|
||||||
@@ -383,7 +471,7 @@ func createTestJobs(id, testResultCMNamePrefix string, numJobs int) []job {
|
|||||||
|
|
||||||
const Branch = "main"
|
const Branch = "main"
|
||||||
|
|
||||||
func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNamePrefix, testRepo string, testJobs []job) {
|
func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNamePrefix, testRepo string, kind DeployKind, testJobs []job) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
@@ -400,23 +488,110 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
|
|||||||
Jobs: map[string]testing.Job{},
|
Jobs: map[string]testing.Job{},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kubernetesContainerMode := os.Getenv("TEST_CONTAINER_MODE") == "kubernetes"
|
||||||
|
|
||||||
|
var container string
|
||||||
|
if kubernetesContainerMode {
|
||||||
|
container = "golang:1.18"
|
||||||
|
}
|
||||||
|
|
||||||
for _, j := range testJobs {
|
for _, j := range testJobs {
|
||||||
wf.Jobs[j.name] = testing.Job{
|
steps := []testing.Step{
|
||||||
RunsOn: runnerLabel,
|
|
||||||
Steps: []testing.Step{
|
|
||||||
{
|
{
|
||||||
Uses: testing.ActionsCheckoutV2,
|
Uses: testing.ActionsCheckout,
|
||||||
},
|
},
|
||||||
{
|
}
|
||||||
|
|
||||||
|
if !kubernetesContainerMode {
|
||||||
|
if kind == RunnerDeployments {
|
||||||
|
steps = append(steps,
|
||||||
|
testing.Step{
|
||||||
|
Run: "sudo mkdir -p \"${RUNNER_TOOL_CACHE}\" \"${HOME}/.cache\" \"/var/lib/docker\"",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
steps = append(steps,
|
||||||
|
testing.Step{
|
||||||
|
// This might be the easiest way to handle permissions without use of securityContext
|
||||||
|
// https://stackoverflow.com/questions/50156124/kubernetes-nfs-persistent-volumes-permission-denied#comment107483717_53186320
|
||||||
|
Run: "sudo chmod 777 -R \"${RUNNER_TOOL_CACHE}\" \"${HOME}/.cache\" \"/var/lib/docker\"",
|
||||||
|
},
|
||||||
|
testing.Step{
|
||||||
|
// This might be the easiest way to handle permissions without use of securityContext
|
||||||
|
// https://stackoverflow.com/questions/50156124/kubernetes-nfs-persistent-volumes-permission-denied#comment107483717_53186320
|
||||||
|
Run: "ls -lah \"${RUNNER_TOOL_CACHE}\" \"${HOME}/.cache\" \"/var/lib/docker\"",
|
||||||
|
},
|
||||||
|
testing.Step{
|
||||||
|
Uses: "actions/setup-go@v3",
|
||||||
|
With: &testing.With{
|
||||||
|
GoVersion: "1.18.2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
steps = append(steps,
|
||||||
|
testing.Step{
|
||||||
|
Run: "go version",
|
||||||
|
},
|
||||||
|
testing.Step{
|
||||||
|
Run: "go build .",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if !kubernetesContainerMode {
|
||||||
|
steps = append(steps,
|
||||||
|
testing.Step{
|
||||||
|
// https://github.com/docker/buildx/issues/413#issuecomment-710660155
|
||||||
|
// To prevent setup-buildx-action from failing with:
|
||||||
|
// error: could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`
|
||||||
|
Run: "docker context create mycontext",
|
||||||
|
},
|
||||||
|
testing.Step{
|
||||||
|
Run: "docker context use mycontext",
|
||||||
|
},
|
||||||
|
testing.Step{
|
||||||
|
Name: "Set up Docker Buildx",
|
||||||
|
Uses: "docker/setup-buildx-action@v1",
|
||||||
|
With: &testing.With{
|
||||||
|
BuildkitdFlags: "--debug",
|
||||||
|
Endpoint: "mycontext",
|
||||||
|
// As the consequence of setting `install: false`, it doesn't install buildx as an alias to `docker build`
|
||||||
|
// so we need to use `docker buildx build` in the next step
|
||||||
|
Install: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
testing.Step{
|
||||||
|
Run: "docker buildx build --platform=linux/amd64 " +
|
||||||
|
"--cache-from=type=local,src=/home/runner/.cache/buildx " +
|
||||||
|
"--cache-to=type=local,dest=/home/runner/.cache/buildx-new,mode=max " +
|
||||||
|
".",
|
||||||
|
},
|
||||||
|
testing.Step{
|
||||||
|
// https://github.com/docker/build-push-action/blob/master/docs/advanced/cache.md#local-cache
|
||||||
|
// See https://github.com/moby/buildkit/issues/1896 for why this is needed
|
||||||
|
Run: "rm -rf /home/runner/.cache/buildx && mv /home/runner/.cache/buildx-new /home/runner/.cache/buildx",
|
||||||
|
},
|
||||||
|
testing.Step{
|
||||||
|
Run: "ls -lah /home/runner/.cache/*",
|
||||||
|
},
|
||||||
|
testing.Step{
|
||||||
Uses: "azure/setup-kubectl@v1",
|
Uses: "azure/setup-kubectl@v1",
|
||||||
With: &testing.With{
|
With: &testing.With{
|
||||||
Version: "v1.20.2",
|
Version: "v1.20.2",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
testing.Step{
|
||||||
Run: fmt.Sprintf("./test.sh %s %s", t.Name(), j.testArg),
|
Run: fmt.Sprintf("./test.sh %s %s", t.Name(), j.testArg),
|
||||||
},
|
},
|
||||||
},
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
wf.Jobs[j.name] = testing.Job{
|
||||||
|
RunsOn: runnerLabel,
|
||||||
|
Container: container,
|
||||||
|
Steps: steps,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -500,5 +675,5 @@ func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return results, err
|
return results, err
|
||||||
}, 3*60*time.Second, 10*time.Second).Should(gomega.Equal(expected))
|
}, 8*60*time.Second, 30*time.Second).Should(gomega.Equal(expected))
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user