mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
81 Commits
actions-ru
...
actions-ru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4ede0c18d0 | ||
|
|
9091d9b756 | ||
|
|
a09c2564d9 | ||
|
|
a555c90fd5 | ||
|
|
38644cf4e8 | ||
|
|
23f357db10 | ||
|
|
584745b67d | ||
|
|
df9592dc99 | ||
|
|
8071ac7066 | ||
|
|
3c33eca501 | ||
|
|
aa827474b2 | ||
|
|
c75c9f9226 | ||
|
|
c09a04ec01 | ||
|
|
618276e3d3 | ||
|
|
18dd89c884 | ||
|
|
98b17dc0a5 | ||
|
|
c658dcfa6d | ||
|
|
c4996d4bbd | ||
|
|
7a3fa4f362 | ||
|
|
1bfd743e69 | ||
|
|
734f3bd63a | ||
|
|
409dc4c114 | ||
|
|
4b9a6c6700 | ||
|
|
86e1a4a8f3 | ||
|
|
544d620bc3 | ||
|
|
1cfe1974c4 | ||
|
|
7e4b6ebd6d | ||
|
|
11cb9b7882 | ||
|
|
10b88bf070 | ||
|
|
8b619e7c6f | ||
|
|
fea1457f12 | ||
|
|
473295e3fc | ||
|
|
9f6f962fc7 | ||
|
|
2a475f25c7 | ||
|
|
dd9f25ea78 | ||
|
|
b8e4eee904 | ||
|
|
edbdef8d20 | ||
|
|
a190fa97bb | ||
|
|
bfc5ea4727 | ||
|
|
5a9e8545aa | ||
|
|
4446ba57e1 | ||
|
|
d62c8a4697 | ||
|
|
946d5b1fa7 | ||
|
|
da6b07660e | ||
|
|
e3deb0d752 | ||
|
|
82641e5036 | ||
|
|
2fe6adf5b7 | ||
|
|
736126b793 | ||
|
|
6abf5bbac8 | ||
|
|
dc4f116bda | ||
|
|
cda10fd243 | ||
|
|
b5d1a63bdf | ||
|
|
6f3e23973d | ||
|
|
a517c1ff66 | ||
|
|
9b28e633c1 | ||
|
|
8161136cbd | ||
|
|
a9ac5a1cbf | ||
|
|
d4f35cff4f | ||
|
|
f661249f07 | ||
|
|
73e430ce54 | ||
|
|
858ef8979d | ||
|
|
1ce0a183a6 | ||
|
|
63935d2053 | ||
|
|
fc63d6d26e | ||
|
|
5ea08411e6 | ||
|
|
067ed2e5ec | ||
|
|
d86bd2bcd7 | ||
|
|
ddd417f756 | ||
|
|
0386c0734c | ||
|
|
af96de6184 | ||
|
|
abb8615796 | ||
|
|
bc7a3cab1b | ||
|
|
e2c8163b8c | ||
|
|
84d16c1c12 | ||
|
|
071898c96b | ||
|
|
f24e2fa44e | ||
|
|
3c7d3d6b57 | ||
|
|
23f091d7fa | ||
|
|
667764e027 | ||
|
|
de693c4191 | ||
|
|
510fc9c834 |
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -133,6 +133,8 @@ body:
|
|||||||
description: "NEVER EVER OMIT THIS! Include logs from `actions-runner-controller`'s controller-manager pod"
|
description: "NEVER EVER OMIT THIS! Include logs from `actions-runner-controller`'s controller-manager pod"
|
||||||
render: shell
|
render: shell
|
||||||
placeholder: |
|
placeholder: |
|
||||||
|
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||||
|
|
||||||
To grab controller logs:
|
To grab controller logs:
|
||||||
|
|
||||||
# Set NS according to your setup
|
# Set NS according to your setup
|
||||||
@@ -142,8 +144,6 @@ body:
|
|||||||
kubectl -n $NS get po
|
kubectl -n $NS get po
|
||||||
|
|
||||||
kubectl -n $NS logs $POD_NAME > arc.log
|
kubectl -n $NS logs $POD_NAME > arc.log
|
||||||
|
|
||||||
Upload it to e.g. https://gist.github.com/ and paste the link to it here.
|
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
@@ -153,6 +153,8 @@ body:
|
|||||||
description: "Include logs from runner pod(s)"
|
description: "Include logs from runner pod(s)"
|
||||||
render: shell
|
render: shell
|
||||||
placeholder: |
|
placeholder: |
|
||||||
|
PROVIDE THE LOGS VIA A GIST LINK (https://gist.github.com/), NOT DIRECTLY IN THIS TEXT AREA
|
||||||
|
|
||||||
To grab the runner pod logs:
|
To grab the runner pod logs:
|
||||||
|
|
||||||
# Set NS according to your setup. It should match your RunnerDeployment's metadata.namespace.
|
# Set NS according to your setup. It should match your RunnerDeployment's metadata.namespace.
|
||||||
@@ -163,8 +165,6 @@ body:
|
|||||||
|
|
||||||
kubectl -n $NS logs $POD_NAME -c runner > runnerpod_runner.log
|
kubectl -n $NS logs $POD_NAME -c runner > runnerpod_runner.log
|
||||||
kubectl -n $NS logs $POD_NAME -c docker > runnerpod_docker.log
|
kubectl -n $NS logs $POD_NAME -c docker > runnerpod_docker.log
|
||||||
|
|
||||||
Upload it to e.g. https://gist.github.com/ and paste the link to it here.
|
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
|
|||||||
@@ -37,12 +37,14 @@ runs:
|
|||||||
version: latest
|
version: latest
|
||||||
|
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
|
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' }}
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
username: ${{ inputs.username }}
|
username: ${{ inputs.username }}
|
||||||
password: ${{ inputs.password }}
|
password: ${{ inputs.password }}
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
|
if: ${{ github.event_name == 'release' || github.event_name == 'push' && github.ref == 'refs/heads/master' }}
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
|
|||||||
9
.github/workflows/publish-arc.yaml
vendored
9
.github/workflows/publish-arc.yaml
vendored
@@ -5,6 +5,11 @@ on:
|
|||||||
types:
|
types:
|
||||||
- published
|
- published
|
||||||
|
|
||||||
|
# https://docs.github.com/en/rest/overview/permissions-required-for-github-apps
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
packages: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release-controller:
|
release-controller:
|
||||||
name: Release
|
name: Release
|
||||||
@@ -58,6 +63,8 @@ jobs:
|
|||||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
|
||||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}
|
||||||
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}-${{ steps.vars.outputs.sha_short }}
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}-${{ steps.vars.outputs.sha_short }}
|
||||||
|
ghcr.io/actions-runner-controller/actions-runner-controller:latest
|
||||||
|
ghcr.io/actions-runner-controller/actions-runner-controller:${{ env.VERSION }}
|
||||||
|
ghcr.io/actions-runner-controller/actions-runner-controller:${{ env.VERSION }}-${{ steps.vars.outputs.sha_short }}
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
cache-to: type=gha,mode=max
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/publish-chart.yaml
vendored
4
.github/workflows/publish-chart.yaml
vendored
@@ -31,7 +31,7 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Helm
|
- name: Set up Helm
|
||||||
uses: azure/setup-helm@v2.1
|
uses: azure/setup-helm@v3.1
|
||||||
with:
|
with:
|
||||||
version: ${{ env.HELM_VERSION }}
|
version: ${{ env.HELM_VERSION }}
|
||||||
|
|
||||||
@@ -73,7 +73,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Create kind cluster
|
- name: Create kind cluster
|
||||||
if: steps.list-changed.outputs.changed == 'true'
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
uses: helm/kind-action@v1.2.0
|
uses: helm/kind-action@v1.3.0
|
||||||
|
|
||||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||||
- name: Install cert-manager
|
- name: Install cert-manager
|
||||||
|
|||||||
19
.github/workflows/runners.yaml
vendored
19
.github/workflows/runners.yaml
vendored
@@ -6,7 +6,16 @@ on:
|
|||||||
- opened
|
- opened
|
||||||
- synchronize
|
- synchronize
|
||||||
- reopened
|
- reopened
|
||||||
- closed
|
branches:
|
||||||
|
- 'master'
|
||||||
|
paths:
|
||||||
|
- 'runner/**'
|
||||||
|
- '!runner/Makefile'
|
||||||
|
- '.github/workflows/runners.yaml'
|
||||||
|
- '!**.md'
|
||||||
|
# We must do a trigger on a push: instead of a types: closed so GitHub Secrets
|
||||||
|
# are available to the workflow run
|
||||||
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'master'
|
- 'master'
|
||||||
paths:
|
paths:
|
||||||
@@ -16,9 +25,10 @@ on:
|
|||||||
- '!**.md'
|
- '!**.md'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUNNER_VERSION: 2.293.0
|
RUNNER_VERSION: 2.294.0
|
||||||
DOCKER_VERSION: 20.10.12
|
DOCKER_VERSION: 20.10.12
|
||||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
RUNNER_CONTAINER_HOOKS_VERSION: 0.1.2
|
||||||
|
DOCKERHUB_USERNAME: summerwind
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-runners:
|
build-runners:
|
||||||
@@ -57,10 +67,11 @@ jobs:
|
|||||||
context: ./runner
|
context: ./runner
|
||||||
file: ./runner/${{ matrix.name }}.dockerfile
|
file: ./runner/${{ matrix.name }}.dockerfile
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
push: ${{ github.ref == 'master' && github.event.pull_request.merged == true }}
|
push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }}
|
||||||
build-args: |
|
build-args: |
|
||||||
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||||
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||||
|
RUNNER_CONTAINER_HOOKS_VERSION=${{ env.RUNNER_CONTAINER_HOOKS_VERSION }}
|
||||||
tags: |
|
tags: |
|
||||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}
|
||||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ matrix.os-name }}-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
||||||
|
|||||||
4
.github/workflows/validate-chart.yaml
vendored
4
.github/workflows/validate-chart.yaml
vendored
@@ -26,7 +26,7 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Helm
|
- name: Set up Helm
|
||||||
uses: azure/setup-helm@v2.1
|
uses: azure/setup-helm@v3.1
|
||||||
with:
|
with:
|
||||||
version: ${{ env.HELM_VERSION }}
|
version: ${{ env.HELM_VERSION }}
|
||||||
|
|
||||||
@@ -67,7 +67,7 @@ jobs:
|
|||||||
ct lint --config charts/.ci/ct-config.yaml
|
ct lint --config charts/.ci/ct-config.yaml
|
||||||
|
|
||||||
- name: Create kind cluster
|
- name: Create kind cluster
|
||||||
uses: helm/kind-action@v1.2.0
|
uses: helm/kind-action@v1.3.0
|
||||||
if: steps.list-changed.outputs.changed == 'true'
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
|
||||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Build the manager binary
|
# Build the manager binary
|
||||||
FROM --platform=$BUILDPLATFORM golang:1.18.2 as builder
|
FROM --platform=$BUILDPLATFORM golang:1.18.4 as builder
|
||||||
|
|
||||||
WORKDIR /workspace
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
|||||||
2
Makefile
2
Makefile
@@ -5,7 +5,7 @@ else
|
|||||||
endif
|
endif
|
||||||
DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1)
|
DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1)
|
||||||
VERSION ?= latest
|
VERSION ?= latest
|
||||||
RUNNER_VERSION ?= 2.293.0
|
RUNNER_VERSION ?= 2.294.0
|
||||||
TARGETPLATFORM ?= $(shell arch)
|
TARGETPLATFORM ?= $(shell arch)
|
||||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||||
RUNNER_TAG ?= ${VERSION}
|
RUNNER_TAG ?= ${VERSION}
|
||||||
|
|||||||
258
README.md
258
README.md
@@ -29,7 +29,9 @@ ToC:
|
|||||||
- [Webhook Driven Scaling](#webhook-driven-scaling)
|
- [Webhook Driven Scaling](#webhook-driven-scaling)
|
||||||
- [Autoscaling to/from 0](#autoscaling-tofrom-0)
|
- [Autoscaling to/from 0](#autoscaling-tofrom-0)
|
||||||
- [Scheduled Overrides](#scheduled-overrides)
|
- [Scheduled Overrides](#scheduled-overrides)
|
||||||
- [Runner with DinD](#runner-with-dind)
|
- [Alternative Runners](#alternative-runners)
|
||||||
|
- [Runner with DinD](#runner-with-dind)
|
||||||
|
- [Runner with k8s jobs](#runner-with-k8s-jobs)
|
||||||
- [Additional Tweaks](#additional-tweaks)
|
- [Additional Tweaks](#additional-tweaks)
|
||||||
- [Custom Volume mounts](#custom-volume-mounts)
|
- [Custom Volume mounts](#custom-volume-mounts)
|
||||||
- [Runner Labels](#runner-labels)
|
- [Runner Labels](#runner-labels)
|
||||||
@@ -38,6 +40,7 @@ ToC:
|
|||||||
- [Using IRSA (IAM Roles for Service Accounts) in EKS](#using-irsa-iam-roles-for-service-accounts-in-eks)
|
- [Using IRSA (IAM Roles for Service Accounts) in EKS](#using-irsa-iam-roles-for-service-accounts-in-eks)
|
||||||
- [Software Installed in the Runner Image](#software-installed-in-the-runner-image)
|
- [Software Installed in the Runner Image](#software-installed-in-the-runner-image)
|
||||||
- [Using without cert-manager](#using-without-cert-manager)
|
- [Using without cert-manager](#using-without-cert-manager)
|
||||||
|
- [Multitenancy](#multitenancy)
|
||||||
- [Troubleshooting](#troubleshooting)
|
- [Troubleshooting](#troubleshooting)
|
||||||
- [Contributing](#contributing)
|
- [Contributing](#contributing)
|
||||||
|
|
||||||
@@ -255,7 +258,7 @@ You can deploy multiple controllers either in a single shared namespace, or in a
|
|||||||
|
|
||||||
If you plan on installing all instances of the controller stack into a single namespace there are a few things you need to do for this to work.
|
If you plan on installing all instances of the controller stack into a single namespace there are a few things you need to do for this to work.
|
||||||
|
|
||||||
1. All resources per stack must have a unique, in the case of Helm this can be done by giving each install a unique release name, or via the `fullnameOverride` properties.
|
1. All resources per stack must have a unique name, in the case of Helm this can be done by giving each install a unique release name, or via the `fullnameOverride` properties.
|
||||||
2. `authSecret.name` needs to be unique per stack when each stack is tied to runners in different GitHub organizations and repositories AND you want your GitHub credentials to be narrowly scoped.
|
2. `authSecret.name` needs to be unique per stack when each stack is tied to runners in different GitHub organizations and repositories AND you want your GitHub credentials to be narrowly scoped.
|
||||||
3. `leaderElectionId` needs to be unique per stack. If this is not unique to the stack the controller tries to race onto the leader election lock resulting in only one stack working concurrently. Your controller will be stuck with a log message something like this `attempting to acquire leader lease arc-controllers/actions-runner-controller...`
|
3. `leaderElectionId` needs to be unique per stack. If this is not unique to the stack the controller tries to race onto the leader election lock resulting in only one stack working concurrently. Your controller will be stuck with a log message something like this `attempting to acquire leader lease arc-controllers/actions-runner-controller...`
|
||||||
4. The MutatingWebhookConfiguration in each stack must include a namespace selector for that stack's corresponding runner namespace, this is already configured in the helm chart.
|
4. The MutatingWebhookConfiguration in each stack must include a namespace selector for that stack's corresponding runner namespace, this is already configured in the helm chart.
|
||||||
@@ -269,52 +272,50 @@ Alternatively, you can install each controller stack into a unique namespace (re
|
|||||||
- The organization level
|
- The organization level
|
||||||
- The enterprise level
|
- The enterprise level
|
||||||
|
|
||||||
There are two ways to use this controller:
|
Runners can be deployed as 1 of 2 abstractions:
|
||||||
|
|
||||||
- Manage runners one by one with `Runner`.
|
- A `RunnerDeployment` (similar to k8s's `Deployments`, based on `Pods`)
|
||||||
- Manage a set of runners with `RunnerDeployment`.
|
- A `RunnerSet` (based on k8s's `StatefulSets`)
|
||||||
|
|
||||||
|
We go into details about the differences between the 2 later, initially lets look at how to deploy a basic `RunnerDeployment` at the 3 possible management hierarchies.
|
||||||
|
|
||||||
### Repository Runners
|
### Repository Runners
|
||||||
|
|
||||||
To launch a single self-hosted runner, you need to create a manifest file that includes a `Runner` resource as follows. This example launches a self-hosted runner with name *example-runner* for the *actions-runner-controller/actions-runner-controller* repository.
|
To launch a single self-hosted runner, you need to create a manifest file that includes a `RunnerDeployment` resource as follows. This example launches a self-hosted runner with name *example-runnerdeploy* for the *actions-runner-controller/actions-runner-controller* repository.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# runner.yaml
|
# runnerdeployment.yaml
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
kind: Runner
|
kind: RunnerDeployment
|
||||||
metadata:
|
metadata:
|
||||||
name: example-runner
|
name: example-runnerdeploy
|
||||||
spec:
|
spec:
|
||||||
repository: example/myrepo
|
replicas: 1
|
||||||
env: []
|
template:
|
||||||
|
spec:
|
||||||
|
repository: mumoshu/actions-runner-controller-ci
|
||||||
```
|
```
|
||||||
|
|
||||||
Apply the created manifest file to your Kubernetes.
|
Apply the created manifest file to your Kubernetes.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ kubectl apply -f runner.yaml
|
$ kubectl apply -f runnerdeployment.yaml
|
||||||
runner.actions.summerwind.dev/example-runner created
|
runnerdeployment.actions.summerwind.dev/example-runnerdeploy created
|
||||||
```
|
```
|
||||||
|
|
||||||
You can see that the Runner resource has been created.
|
You can see that 1 runner and its underlying pod has been created as specified by `replicas: 1` attribute:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ kubectl get runners
|
$ kubectl get runners
|
||||||
NAME REPOSITORY STATUS
|
NAME REPOSITORY STATUS
|
||||||
example-runner actions-runner-controller/actions-runner-controller Running
|
example-runnerdeploy2475h595fr mumoshu/actions-runner-controller-ci Running
|
||||||
```
|
|
||||||
|
|
||||||
You can also see that the runner pod has been running.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ kubectl get pods
|
$ kubectl get pods
|
||||||
NAME READY STATUS RESTARTS AGE
|
NAME READY STATUS RESTARTS AGE
|
||||||
example-runner 2/2 Running 0 1m
|
example-runnerdeploy2475ht2qbr 2/2 Running 0 1m
|
||||||
```
|
```
|
||||||
|
|
||||||
The runner you created has been registered to your repository.
|
The runner you created has been registered directly to the defined repository, you should be able to see it in the settings of the repository.
|
||||||
|
|
||||||
<img width="756" alt="Actions tab in your repository settings" src="https://user-images.githubusercontent.com/230145/73618667-8cbf9700-466c-11ea-80b6-c67e6d3f70e7.png">
|
|
||||||
|
|
||||||
Now you can use your self-hosted runner. See the [official documentation](https://help.github.com/en/actions/automating-your-workflow-with-github-actions/using-self-hosted-runners-in-a-workflow) on how to run a job with it.
|
Now you can use your self-hosted runner. See the [official documentation](https://help.github.com/en/actions/automating-your-workflow-with-github-actions/using-self-hosted-runners-in-a-workflow) on how to run a job with it.
|
||||||
|
|
||||||
@@ -323,13 +324,15 @@ Now you can use your self-hosted runner. See the [official documentation](https:
|
|||||||
To add the runner to an organization, you only need to replace the `repository` field with `organization`, so the runner will register itself to the organization.
|
To add the runner to an organization, you only need to replace the `repository` field with `organization`, so the runner will register itself to the organization.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# runner.yaml
|
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
kind: Runner
|
kind: RunnerDeployment
|
||||||
metadata:
|
metadata:
|
||||||
name: example-org-runner
|
name: example-runnerdeploy
|
||||||
spec:
|
spec:
|
||||||
organization: your-organization-name
|
replicas: 1
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
organization: your-organization-name
|
||||||
```
|
```
|
||||||
|
|
||||||
Now you can see the runner on the organization level (if you have organization owner permissions).
|
Now you can see the runner on the organization level (if you have organization owner permissions).
|
||||||
@@ -339,24 +342,22 @@ Now you can see the runner on the organization level (if you have organization o
|
|||||||
To add the runner to an enterprise, you only need to replace the `repository` field with `enterprise`, so the runner will register itself to the enterprise.
|
To add the runner to an enterprise, you only need to replace the `repository` field with `enterprise`, so the runner will register itself to the enterprise.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# runner.yaml
|
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
kind: Runner
|
kind: RunnerDeployment
|
||||||
metadata:
|
metadata:
|
||||||
name: example-enterprise-runner
|
name: example-runnerdeploy
|
||||||
spec:
|
spec:
|
||||||
enterprise: your-enterprise-name
|
replicas: 1
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
enterprise: your-enterprise-name
|
||||||
```
|
```
|
||||||
|
|
||||||
Now you can see the runner on the enterprise level (if you have enterprise access permissions).
|
Now you can see the runner on the enterprise level (if you have enterprise access permissions).
|
||||||
|
|
||||||
### RunnerDeployments
|
### RunnerDeployments
|
||||||
|
|
||||||
You can manage sets of runners instead of individually through the `RunnerDeployment` kind and its `replicas:` attribute. This kind is required for many of the advanced features.
|
In our previous examples we were deploying a single runner via the `RunnerDeployment` kind, the amount of runners deployed can be statically set via the `replicas:` field, we can increase this value to deploy additioanl sets of runners instead:
|
||||||
|
|
||||||
There are `RunnerReplicaSet` and `RunnerDeployment` kinds that corresponds to the `ReplicaSet` and `Deployment` kinds but for the `Runner` kind.
|
|
||||||
|
|
||||||
You typically only need `RunnerDeployment` rather than `RunnerReplicaSet` as the former is for managing the latter.
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# runnerdeployment.yaml
|
# runnerdeployment.yaml
|
||||||
@@ -365,11 +366,11 @@ kind: RunnerDeployment
|
|||||||
metadata:
|
metadata:
|
||||||
name: example-runnerdeploy
|
name: example-runnerdeploy
|
||||||
spec:
|
spec:
|
||||||
|
# This will deploy 2 runners now
|
||||||
replicas: 2
|
replicas: 2
|
||||||
template:
|
template:
|
||||||
spec:
|
spec:
|
||||||
repository: mumoshu/actions-runner-controller-ci
|
repository: mumoshu/actions-runner-controller-ci
|
||||||
env: []
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Apply the manifest file to your cluster:
|
Apply the manifest file to your cluster:
|
||||||
@@ -388,15 +389,13 @@ example-runnerdeploy2475h595fr mumoshu/actions-runner-controller-ci Running
|
|||||||
example-runnerdeploy2475ht2qbr mumoshu/actions-runner-controller-ci Running
|
example-runnerdeploy2475ht2qbr mumoshu/actions-runner-controller-ci Running
|
||||||
```
|
```
|
||||||
|
|
||||||
### RunnerSets
|
### RunnerSets
|
||||||
|
|
||||||
> This feature requires controller version => [v0.20.0](https://github.com/actions-runner-controller/actions-runner-controller/releases/tag/v0.20.0)
|
> This feature requires controller version => [v0.20.0](https://github.com/actions-runner-controller/actions-runner-controller/releases/tag/v0.20.0)
|
||||||
|
|
||||||
_Ensure you see the limitations before using this kind!!!!!_
|
_Ensure you see the limitations before using this kind!!!!!_
|
||||||
|
|
||||||
For scenarios where you require the advantages of a `StatefulSet`, for example persistent storage, ARC implements a runner based on Kubernetes' `StatefulSets`, the `RunnerSet`.
|
We can also deploy sets of RunnerSets the same way, a basic `RunnerSet` would look like this:
|
||||||
|
|
||||||
A basic `RunnerSet` would look like this:
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
@@ -404,8 +403,7 @@ kind: RunnerSet
|
|||||||
metadata:
|
metadata:
|
||||||
name: example
|
name: example
|
||||||
spec:
|
spec:
|
||||||
ephemeral: false
|
replicas: 1
|
||||||
replicas: 2
|
|
||||||
repository: mumoshu/actions-runner-controller-ci
|
repository: mumoshu/actions-runner-controller-ci
|
||||||
# Other mandatory fields from StatefulSet
|
# Other mandatory fields from StatefulSet
|
||||||
selector:
|
selector:
|
||||||
@@ -418,7 +416,7 @@ spec:
|
|||||||
app: example
|
app: example
|
||||||
```
|
```
|
||||||
|
|
||||||
As it is based on `StatefulSet`, `selector` and `template.medatada.labels` it needs to be defined and have the exact same set of labels. `serviceName` must be set to some non-empty string as it is also required by `StatefulSet`.
|
As it is based on `StatefulSet`, `selector` and `template.metadata.labels` it needs to be defined and have the exact same set of labels. `serviceName` must be set to some non-empty string as it is also required by `StatefulSet`.
|
||||||
|
|
||||||
Runner-related fields like `ephemeral`, `repository`, `organization`, `enterprise`, and so on should be written directly under `spec`.
|
Runner-related fields like `ephemeral`, `repository`, `organization`, `enterprise`, and so on should be written directly under `spec`.
|
||||||
|
|
||||||
@@ -436,8 +434,7 @@ kind: RunnerSet
|
|||||||
metadata:
|
metadata:
|
||||||
name: example
|
name: example
|
||||||
spec:
|
spec:
|
||||||
ephemeral: false
|
replicas: 1
|
||||||
replicas: 2
|
|
||||||
repository: mumoshu/actions-runner-controller-ci
|
repository: mumoshu/actions-runner-controller-ci
|
||||||
dockerdWithinRunnerContainer: true
|
dockerdWithinRunnerContainer: true
|
||||||
template:
|
template:
|
||||||
@@ -718,7 +715,7 @@ With the above example, the webhook server scales `example-runners` by `1` repli
|
|||||||
|
|
||||||
Of note is the `HRA.spec.scaleUpTriggers[].duration` attribute. This attribute is used to calculate if the replica number added via the trigger is expired or not. On each reconciliation loop, the controller sums up all the non-expiring replica numbers from previous scale-up triggers. It then compares the summed desired replica number against the current replica number. If the summed desired replica number > the current number then it means the replica count needs to scale up.
|
Of note is the `HRA.spec.scaleUpTriggers[].duration` attribute. This attribute is used to calculate if the replica number added via the trigger is expired or not. On each reconciliation loop, the controller sums up all the non-expiring replica numbers from previous scale-up triggers. It then compares the summed desired replica number against the current replica number. If the summed desired replica number > the current number then it means the replica count needs to scale up.
|
||||||
|
|
||||||
As mentioned previously, the `scaleDownDelaySecondsAfterScaleOut` property has the final say still. If the latest scale-up time + the anti-flapping duration is later than the current time, it doesn’t immediately scale up and instead retries the calculation again later to see if it needs to scale yet.
|
As mentioned previously, the `scaleDownDelaySecondsAfterScaleOut` property has the final say still. If the latest scale-up time + the anti-flapping duration is later than the current time, it doesn’t immediately scale down and instead retries the calculation again later to see if it needs to scale yet.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -726,6 +723,8 @@ The primary benefit of autoscaling on Webhooks compared to the pull driven scali
|
|||||||
|
|
||||||
> You can learn the implementation details in [#282](https://github.com/actions-runner-controller/actions-runner-controller/pull/282)
|
> You can learn the implementation details in [#282](https://github.com/actions-runner-controller/actions-runner-controller/pull/282)
|
||||||
|
|
||||||
|
##### Install with Helm
|
||||||
|
|
||||||
To enable this feature, you first need to install the GitHub webhook server. To install via our Helm chart,
|
To enable this feature, you first need to install the GitHub webhook server. To install via our Helm chart,
|
||||||
_[see the values documentation for all configuration options](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/charts/actions-runner-controller/README.md)_
|
_[see the values documentation for all configuration options](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/charts/actions-runner-controller/README.md)_
|
||||||
|
|
||||||
@@ -839,12 +838,49 @@ alongside your webhook on the Settings -> Webhooks page.
|
|||||||
Once you were able to confirm that the Webhook server is ready and running from GitHub create or update your
|
Once you were able to confirm that the Webhook server is ready and running from GitHub create or update your
|
||||||
`HorizontalRunnerAutoscaler` resources by learning the following configuration examples.
|
`HorizontalRunnerAutoscaler` resources by learning the following configuration examples.
|
||||||
|
|
||||||
|
##### Install with Kustomize
|
||||||
|
|
||||||
|
To install this feature using Kustomize, add `github-webhook-server` resources to your `kustomization.yaml` file as in the example below:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
||||||
|
resources:
|
||||||
|
# You should already have this
|
||||||
|
- github.com/actions-runner-controller/actions-runner-controller/config//default?ref=v0.22.2
|
||||||
|
# Add the below!
|
||||||
|
- github.com/actions-runner-controller/actions-runner-controller/config//github-webhook-server?ref=v0.22.2
|
||||||
|
|
||||||
|
Finally, you will have to configure an ingress so that you may configure the webhook in github. An example of such ingress can be find below:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: actions-runners-webhook-server
|
||||||
|
spec:
|
||||||
|
rules:
|
||||||
|
- http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: github-webhook-server
|
||||||
|
port:
|
||||||
|
number: 80
|
||||||
|
pathType: Exact
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Examples
|
||||||
|
|
||||||
- [Example 1: Scale on each `workflow_job` event](#example-1-scale-on-each-workflow_job-event)
|
- [Example 1: Scale on each `workflow_job` event](#example-1-scale-on-each-workflow_job-event)
|
||||||
- [Example 2: Scale up on each `check_run` event](#example-2-scale-up-on-each-check_run-event)
|
- [Example 2: Scale up on each `check_run` event](#example-2-scale-up-on-each-check_run-event)
|
||||||
- [Example 3: Scale on each `pull_request` event against a given set of branches](#example-3-scale-on-each-pull_request-event-against-a-given-set-of-branches)
|
- [Example 3: Scale on each `pull_request` event against a given set of branches](#example-3-scale-on-each-pull_request-event-against-a-given-set-of-branches)
|
||||||
- [Example 4: Scale on each `push` event](#example-4-scale-on-each-push-event)
|
- [Example 4: Scale on each `push` event](#example-4-scale-on-each-push-event)
|
||||||
|
|
||||||
##### Example 1: Scale on each `workflow_job` event
|
###### Example 1: Scale on each `workflow_job` event
|
||||||
|
|
||||||
> This feature requires controller version => [v0.20.0](https://github.com/actions-runner-controller/actions-runner-controller/releases/tag/v0.20.0)
|
> This feature requires controller version => [v0.20.0](https://github.com/actions-runner-controller/actions-runner-controller/releases/tag/v0.20.0)
|
||||||
|
|
||||||
@@ -888,7 +924,7 @@ You can configure your GitHub webhook settings to only include `Workflows Job` e
|
|||||||
|
|
||||||
Each kind has a `status` of `queued`, `in_progress` and `completed`. With the above configuration, `actions-runner-controller` adds one runner for a `workflow_job` event whose `status` is `queued`. Similarly, it removes one runner for a `workflow_job` event whose `status` is `completed`. The caveat to this to remember is that this scale-down is within the bounds of your `scaleDownDelaySecondsAfterScaleOut` configuration, if this time hasn't passed the scale down will be deferred.
|
Each kind has a `status` of `queued`, `in_progress` and `completed`. With the above configuration, `actions-runner-controller` adds one runner for a `workflow_job` event whose `status` is `queued`. Similarly, it removes one runner for a `workflow_job` event whose `status` is `completed`. The caveat to this to remember is that this scale-down is within the bounds of your `scaleDownDelaySecondsAfterScaleOut` configuration, if this time hasn't passed the scale down will be deferred.
|
||||||
|
|
||||||
##### Example 2: Scale up on each `check_run` event
|
###### Example 2: Scale up on each `check_run` event
|
||||||
|
|
||||||
> Note: This should work almost like https://github.com/philips-labs/terraform-aws-github-runner
|
> Note: This should work almost like https://github.com/philips-labs/terraform-aws-github-runner
|
||||||
|
|
||||||
@@ -950,7 +986,7 @@ spec:
|
|||||||
duration: "5m"
|
duration: "5m"
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Example 3: Scale on each `pull_request` event against a given set of branches
|
###### Example 3: Scale on each `pull_request` event against a given set of branches
|
||||||
|
|
||||||
To scale up replicas of the runners for `example/myrepo` by 1 for 5 minutes on each `pull_request` against the `main` or `develop` branch you write manifests like the below:
|
To scale up replicas of the runners for `example/myrepo` by 1 for 5 minutes on each `pull_request` against the `main` or `develop` branch you write manifests like the below:
|
||||||
|
|
||||||
@@ -1103,9 +1139,13 @@ The earlier entry is prioritized higher than later entries. So you usually defin
|
|||||||
|
|
||||||
A common use case for this may be to have 1 override to scale to 0 during the week outside of core business hours and another override to scale to 0 during all hours of the weekend.
|
A common use case for this may be to have 1 override to scale to 0 during the week outside of core business hours and another override to scale to 0 during all hours of the weekend.
|
||||||
|
|
||||||
### Runner with DinD
|
### Alternative Runners
|
||||||
|
|
||||||
When using the default runner, the runner pod starts up 2 containers: runner and DinD (Docker-in-Docker). This might create issues if there's `LimitRange` set to namespace.
|
ARC also offers a few altenrative runner options
|
||||||
|
|
||||||
|
#### Runner with DinD
|
||||||
|
|
||||||
|
When using the default runner, the runner pod starts up 2 containers: runner and DinD (Docker-in-Docker). ARC maintains an alternative all in one runner image with docker running in the same container as the runner. This may be prefered from a resource or complexity perspective or to be compliant with a `LimitRange` namespace configuration.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# dindrunnerdeployment.yaml
|
# dindrunnerdeployment.yaml
|
||||||
@@ -1123,7 +1163,35 @@ spec:
|
|||||||
env: []
|
env: []
|
||||||
```
|
```
|
||||||
|
|
||||||
This also helps with resources, as you don't need to give resources separately to docker and runner.
|
#### Runner with K8s Jobs
|
||||||
|
|
||||||
|
When using the default runner, jobs that use a container will run in docker. This necessitates privileged mode, either on the runner pod or the sidecar container
|
||||||
|
|
||||||
|
By setting the container mode, you can instead invoke these jobs using a [kubernetes implementation](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s) while not executing in privileged mode.
|
||||||
|
|
||||||
|
The runner will dynamically spin up pods and k8s jobs in the runner's namespace to run the workflow, so a `workVolumeClaimTemplate` is required for the runner's working directory, and a service account with the [appropriate permissions.](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s#pre-requisites)
|
||||||
|
|
||||||
|
There are some [limitations](https://github.com/actions/runner-container-hooks/tree/main/packages/k8s#limitations) to this approach, mainly [job containers](https://docs.github.com/en/actions/using-jobs/running-jobs-in-a-container) are required on all workflows.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# runner.yaml
|
||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: Runner
|
||||||
|
metadata:
|
||||||
|
name: example-runner
|
||||||
|
spec:
|
||||||
|
repository: example/myrepo
|
||||||
|
containerMode: kubernetes
|
||||||
|
serviceAccountName: my-service-account
|
||||||
|
workVolumeClaimTemplate:
|
||||||
|
storageClassName: "my-dynamic-storage-class"
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Gi
|
||||||
|
env: []
|
||||||
|
```
|
||||||
|
|
||||||
### Additional Tweaks
|
### Additional Tweaks
|
||||||
|
|
||||||
@@ -1142,6 +1210,7 @@ spec:
|
|||||||
annotations:
|
annotations:
|
||||||
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
|
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
|
||||||
spec:
|
spec:
|
||||||
|
priorityClassName: "high"
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
node-role.kubernetes.io/test: ""
|
node-role.kubernetes.io/test: ""
|
||||||
|
|
||||||
@@ -1315,7 +1384,7 @@ spec:
|
|||||||
- name: tmp
|
- name: tmp
|
||||||
emptyDir:
|
emptyDir:
|
||||||
medium: Memory
|
medium: Memory
|
||||||
emphemeral: true # recommended to not leak data between builds.
|
ephemeral: true # recommended to not leak data between builds.
|
||||||
```
|
```
|
||||||
|
|
||||||
#### NVME SSD
|
#### NVME SSD
|
||||||
@@ -1323,7 +1392,7 @@ spec:
|
|||||||
In this example we provide NVME backed storage for the workdir, docker sidecar and /tmp within the runner.
|
In this example we provide NVME backed storage for the workdir, docker sidecar and /tmp within the runner.
|
||||||
Here we use a working example on GKE, which will provide the NVME disk at /mnt/disks/ssd0. We will be placing the respective volumes in subdirs here and in order to be able to run multiple runners we will use the pod name as a prefix for subdirectories. Also the disk will fill up over time and disk space will not be freed until the node is removed.
|
Here we use a working example on GKE, which will provide the NVME disk at /mnt/disks/ssd0. We will be placing the respective volumes in subdirs here and in order to be able to run multiple runners we will use the pod name as a prefix for subdirectories. Also the disk will fill up over time and disk space will not be freed until the node is removed.
|
||||||
|
|
||||||
**Beware** that running these persistent backend volumes **leave data behind** between 2 different jobs on the workdir and `/tmp` with `emphemeral: false`.
|
**Beware** that running these persistent backend volumes **leave data behind** between 2 different jobs on the workdir and `/tmp` with `ephemeral: false`.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
kind: RunnerDeployment
|
kind: RunnerDeployment
|
||||||
@@ -1364,7 +1433,7 @@ spec:
|
|||||||
- hostPath:
|
- hostPath:
|
||||||
path: /mnt/disks/ssd0
|
path: /mnt/disks/ssd0
|
||||||
name: tmp
|
name: tmp
|
||||||
emphemeral: true # VERY important. otherwise data inside the workdir and /tmp is not cleared between builds
|
ephemeral: true # VERY important. otherwise data inside the workdir and /tmp is not cleared between builds
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Docker image layers caching
|
#### Docker image layers caching
|
||||||
@@ -1393,7 +1462,7 @@ spec:
|
|||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: var-lib-docker
|
- name: var-lib-docker
|
||||||
mountPath: /var/lib/docker
|
mountPath: /var/lib/docker
|
||||||
volumeClaimtemplates:
|
volumeClaimTemplates:
|
||||||
- metadata:
|
- metadata:
|
||||||
name: var-lib-docker
|
name: var-lib-docker
|
||||||
spec:
|
spec:
|
||||||
@@ -1499,7 +1568,6 @@ jobs:
|
|||||||
When you have multiple kinds of self-hosted runners, you can distinguish between them using labels. In order to do so, you can specify one or more labels in your `Runner` or `RunnerDeployment` spec.
|
When you have multiple kinds of self-hosted runners, you can distinguish between them using labels. In order to do so, you can specify one or more labels in your `Runner` or `RunnerDeployment` spec.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# runnerdeployment.yaml
|
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
kind: RunnerDeployment
|
kind: RunnerDeployment
|
||||||
metadata:
|
metadata:
|
||||||
@@ -1521,7 +1589,10 @@ jobs:
|
|||||||
runs-on: custom-runner
|
runs-on: custom-runner
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that if you specify `self-hosted` in your workflow, then this will run your job on _any_ self-hosted runner, regardless of the labels that they have.
|
When using labels there are a few things to be aware of:
|
||||||
|
|
||||||
|
1. `self-hosted` is implict with every runner as this is an automatic label GitHub apply to any self-hosted runner. As a result ARC can treat all runners as having this label without having it explicitly defined in a runner's manifest. You do not need to explicitly define this label in your runner manifests (you can if you want though).
|
||||||
|
2. In addition to the `self-hosted` label, GitHub also applies a few other [default](https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow#using-default-labels-to-route-jobs) labels to any self-hosted runner. The other default labels relate to the architecture of the runner and so can't be implicitly applied by ARC as ARC doesn't know if the runner is `linux` or `windows`, `x64` or `ARM64` etc. If you wish to use these labels in your workflows and have ARC scale runners accurately you must also add them to your runner manifests.
|
||||||
|
|
||||||
### Runner Groups
|
### Runner Groups
|
||||||
|
|
||||||
@@ -1530,7 +1601,6 @@ Runner groups can be used to limit which repositories are able to use the GitHub
|
|||||||
To add the runner to the group `NewGroup`, specify the group in your `Runner` or `RunnerDeployment` spec.
|
To add the runner to the group `NewGroup`, specify the group in your `Runner` or `RunnerDeployment` spec.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# runnerdeployment.yaml
|
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
kind: RunnerDeployment
|
kind: RunnerDeployment
|
||||||
metadata:
|
metadata:
|
||||||
@@ -1579,12 +1649,6 @@ spec:
|
|||||||
# Disables automatic runner updates
|
# Disables automatic runner updates
|
||||||
- name: DISABLE_RUNNER_UPDATE
|
- name: DISABLE_RUNNER_UPDATE
|
||||||
value: "true"
|
value: "true"
|
||||||
# Configure runner with legacy --once instead of --ephemeral flag
|
|
||||||
# WARNING | THIS ENV VAR IS DEPRECATED AND WILL BE REMOVED
|
|
||||||
# THIS ENV VAR WILL BE REMOVED SOON.
|
|
||||||
# SEE ISSUE #1196 FOR DETAILS
|
|
||||||
- name: RUNNER_FEATURE_FLAG_ONCE
|
|
||||||
value: "true"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Using IRSA (IAM Roles for Service Accounts) in EKS
|
### Using IRSA (IAM Roles for Service Accounts) in EKS
|
||||||
@@ -1679,6 +1743,64 @@ $ helm --upgrade install actions-runner-controller/actions-runner-controller \
|
|||||||
admissionWebHooks.caBundle=${CA_BUNDLE}
|
admissionWebHooks.caBundle=${CA_BUNDLE}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Multitenancy
|
||||||
|
|
||||||
|
> This feature requires controller version => [v0.26.0](https://github.com/actions-runner-controller/actions-runner-controller/releases/tag/v0.26.0)
|
||||||
|
|
||||||
|
In a large enterprise, there might be many GitHub organizations that requires self-hosted runners. Previously, the only way to provide ARC-managed self-hosted runners in such environment was [Deploying Multiple Controllers](#deploying-multiple-controllers), which incurs overhead due to it requires one ARC installation per GitHub organization.
|
||||||
|
|
||||||
|
With multitenancy, you can let ARC manage self-hosted runners across organizations. It's enabled by default and the only thing you need to start using it is to set the `spec.githubAPICredentialsFrom.secretRef.name` fields for the following resources:
|
||||||
|
|
||||||
|
- `HorizontalRunnerAutoscaler`
|
||||||
|
- `RunnerSet`
|
||||||
|
|
||||||
|
Or `spec.template.spec.githubAPICredentialsFrom.secretRef.name` field for the following resource:
|
||||||
|
|
||||||
|
- `RunnerDeployment`
|
||||||
|
|
||||||
|
> Although not explained above, `spec.githubAPICredentialsFrom` fields do exist in `Runner` and `RunnerReplicaSet`. A comparable pod annotation exists for the runner pod, too.
|
||||||
|
> However, note that `Runner`, `RunnerReplicaSet` and runner pods are implementation details and are managed by `RunnerDeployment` and ARC.
|
||||||
|
> Usually you don't need to manually set the fields for those resources.
|
||||||
|
|
||||||
|
`githubAPICredentialsFrom.secretRef.name` should refer to the name of the Kubernetes secret that contains either PAT or GitHub App credentials that is used for GitHub API calls for the said resource.
|
||||||
|
|
||||||
|
Usually, you should have a set of GitHub App credentials per a GitHub organization and you would have a RunnerDeployment and a HorizontalRunnerAutoscaler per an organization runner group. So, you might end up having the following resources for each organization:
|
||||||
|
|
||||||
|
- 1 Kuernetes secret that contains GitHub App credentials
|
||||||
|
- 1 RunnerDeployment/RunnerSet and 1 HorizontalRunnerAutoscaler per Runner Group
|
||||||
|
|
||||||
|
And the RunnerDeployment/RunnerSet and HorizontalRunnerAutoscaler should have the same value for `spec.githubAPICredentialsFrom.secretRef.name`, which refers to the name of the Kubernetes secret.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kind: Secret
|
||||||
|
data:
|
||||||
|
github_app_id: ...
|
||||||
|
github_app_installation_id: ...
|
||||||
|
github_app_private_key: ...
|
||||||
|
---
|
||||||
|
kind: RunnerDeployment
|
||||||
|
metadata:
|
||||||
|
namespace: org1-runners
|
||||||
|
spec:
|
||||||
|
githubAPICredentialsFrom:
|
||||||
|
secretRef:
|
||||||
|
name: org1-github-app
|
||||||
|
---
|
||||||
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
metadata:
|
||||||
|
namespace: org1-runners
|
||||||
|
spec:
|
||||||
|
githubAPICredentialsFrom:
|
||||||
|
secretRef:
|
||||||
|
name: org1-github-app
|
||||||
|
```
|
||||||
|
|
||||||
|
> Do note that, as shown in the above example, you usually set the same secret name to `githubAPICredentialsFrom.secretRef.name` fields of both `RunnerDeployment` and `HorizontalRunnerAutoscaler`, so that GitHub API calls for the same set of runners shares the specified credentials, regardless of
|
||||||
|
when and which varying ARC component(`horizontalrunnerautoscaler-controller`, `runnerdeployment-controller`, `runnerreplicaset-controller`, `runner-controller` or `runnerpod-controller`) makes specific API calls.
|
||||||
|
> Just don't be surprised you have to repeat `githubAPICredentialsFrom.secretRef.name` settings among two resources!
|
||||||
|
|
||||||
|
Please refer to [Deploying Using GitHub App Authentication](#deploying-using-github-app-authentication) for how you could create the Kubernetes secret containing GitHub App credentials.
|
||||||
|
|
||||||
# Troubleshooting
|
# Troubleshooting
|
||||||
|
|
||||||
See [troubleshooting guide](TROUBLESHOOTING.md) for solutions to various problems people have run into consistently.
|
See [troubleshooting guide](TROUBLESHOOTING.md) for solutions to various problems people have run into consistently.
|
||||||
|
|||||||
@@ -2,8 +2,9 @@
|
|||||||
|
|
||||||
* [Tools](#tools)
|
* [Tools](#tools)
|
||||||
* [Installation](#installation)
|
* [Installation](#installation)
|
||||||
|
* [InternalError when calling webhook: context deadline exceeded](#internalerror-when-calling-webhook-context-deadline-exceeded)
|
||||||
* [Invalid header field value](#invalid-header-field-value)
|
* [Invalid header field value](#invalid-header-field-value)
|
||||||
* [Deployment fails on GKE due to webhooks](#deployment-fails-on-gke-due-to-webhooks)
|
* [Helm chart install failure: certificate signed by unknown authority](#helm-chart-install-failure-certificate-signed-by-unknown-authority)
|
||||||
* [Operations](#operations)
|
* [Operations](#operations)
|
||||||
* [Stuck runner kind or backing pod](#stuck-runner-kind-or-backing-pod)
|
* [Stuck runner kind or backing pod](#stuck-runner-kind-or-backing-pod)
|
||||||
* [Delay in jobs being allocated to runners](#delay-in-jobs-being-allocated-to-runners)
|
* [Delay in jobs being allocated to runners](#delay-in-jobs-being-allocated-to-runners)
|
||||||
@@ -22,39 +23,37 @@ A list of tools which are helpful for troubleshooting
|
|||||||
|
|
||||||
Troubeshooting runbooks that relate to ARC installation problems
|
Troubeshooting runbooks that relate to ARC installation problems
|
||||||
|
|
||||||
### Invalid header field value
|
### InternalError when calling webhook: context deadline exceeded
|
||||||
|
|
||||||
**Problem**
|
**Problem**
|
||||||
|
|
||||||
```json
|
This issue can come up for various reasons like leftovers from previous installations or not being able to access the K8s service's clusterIP associated with the admission webhook server (of ARC).
|
||||||
2020-11-12T22:17:30.693Z ERROR controller-runtime.controller Reconciler error
|
|
||||||
{
|
```
|
||||||
"controller": "runner",
|
Internal error occurred: failed calling webhook "mutate.runnerdeployment.actions.summerwind.dev":
|
||||||
"request": "actions-runner-system/runner-deployment-dk7q8-dk5c9",
|
Post "https://actions-runner-controller-webhook.actions-runner-system.svc:443/mutate-actions-summerwind-dev-v1alpha1-runnerdeployment?timeout=10s": context deadline exceeded
|
||||||
"error": "failed to create registration token: Post \"https://api.github.com/orgs/$YOUR_ORG_HERE/actions/runners/registration-token\": net/http: invalid header field value \"Bearer $YOUR_TOKEN_HERE\\n\" for key Authorization"
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Solution**
|
**Solution**
|
||||||
|
|
||||||
Your base64'ed PAT token has a new line at the end, it needs to be created without a `\n` added, either:
|
First we will try the common solution of checking webhook leftovers from previous installations:
|
||||||
* `echo -n $TOKEN | base64`
|
|
||||||
* Create the secret as described in the docs using the shell and documented flags
|
1. ```bash
|
||||||
|
kubectl get validatingwebhookconfiguration -A
|
||||||
|
kubectl get mutatingwebhookconfiguration -A
|
||||||
|
```
|
||||||
|
2. If you see any webhooks related to actions-runner-controller, delete them:
|
||||||
|
```bash
|
||||||
|
kubectl delete mutatingwebhookconfiguration actions-runner-controller-mutating-webhook-configuration
|
||||||
|
kubectl delete validatingwebhookconfiguration actions-runner-controller-validating-webhook-configuration
|
||||||
|
```
|
||||||
|
|
||||||
|
If that didn't work then probably your K8s control-plane is somehow unable to access the K8s service's clusterIP associated with the admission webhook server:
|
||||||
|
1. You're running apiserver as a binary and you didn't make service cluster IPs available to the host network.
|
||||||
|
2. You're running the apiserver in the pod but your pod network (i.e. CNI plugin installation and config) is not good so your pods(like kube-apiserver) in the K8s control-plane nodes can't access ARC's admission webhook server pod(s) in probably data-plane nodes.
|
||||||
|
|
||||||
|
|
||||||
### Deployment fails on GKE due to webhooks
|
Another reason could be due to GKEs firewall settings you may run into the following errors when trying to deploy runners on a private GKE cluster:
|
||||||
|
|
||||||
**Problem**
|
|
||||||
|
|
||||||
Due to GKEs firewall settings you may run into the following errors when trying to deploy runners on a private GKE cluster:
|
|
||||||
|
|
||||||
```
|
|
||||||
Internal error occurred: failed calling webhook "mutate.runner.actions.summerwind.dev":
|
|
||||||
Post https://webhook-service.actions-runner-system.svc:443/mutate-actions-summerwind-dev-v1alpha1-runner?timeout=10s:
|
|
||||||
context deadline exceeded
|
|
||||||
```
|
|
||||||
|
|
||||||
**Solution**<br />
|
|
||||||
|
|
||||||
To fix this, you may either:
|
To fix this, you may either:
|
||||||
|
|
||||||
@@ -88,6 +87,57 @@ To fix this, you may either:
|
|||||||
gcloud compute firewall-rules create k8s-cert-manager --source-ranges $SOURCE --target-tags $WORKER_NODES_TAG --allow TCP:9443 --network $NETWORK
|
gcloud compute firewall-rules create k8s-cert-manager --source-ranges $SOURCE --target-tags $WORKER_NODES_TAG --allow TCP:9443 --network $NETWORK
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Invalid header field value
|
||||||
|
|
||||||
|
**Problem**
|
||||||
|
|
||||||
|
```json
|
||||||
|
2020-11-12T22:17:30.693Z ERROR controller-runtime.controller Reconciler error
|
||||||
|
{
|
||||||
|
"controller": "runner",
|
||||||
|
"request": "actions-runner-system/runner-deployment-dk7q8-dk5c9",
|
||||||
|
"error": "failed to create registration token: Post \"https://api.github.com/orgs/$YOUR_ORG_HERE/actions/runners/registration-token\": net/http: invalid header field value \"Bearer $YOUR_TOKEN_HERE\\n\" for key Authorization"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution**
|
||||||
|
|
||||||
|
Your base64'ed PAT token has a new line at the end, it needs to be created without a `\n` added, either:
|
||||||
|
* `echo -n $TOKEN | base64`
|
||||||
|
* Create the secret as described in the docs using the shell and documented flags
|
||||||
|
|
||||||
|
### Helm chart install failure: certificate signed by unknown authority
|
||||||
|
|
||||||
|
**Problem**
|
||||||
|
|
||||||
|
```
|
||||||
|
Error: UPGRADE FAILED: failed to create resource: Internal error occurred: failed calling webhook "webhook.cert-manager.io": failed to call webhook: Post "https://cert-manager-webhook.cert-manager.svc:443/mutate?timeout=10s": x509: certificate signed by unknown authority
|
||||||
|
```
|
||||||
|
|
||||||
|
Apparently, it's failing while `helm` is creating one of resources defined in the ARC chart and the cause was that cert-manager's webhook is not working correctly, due to the missing or the invalid CA certficate.
|
||||||
|
|
||||||
|
You'd try to tail logs from the `cert-manager-cainjector` and see it's failing with an error like:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ kubectl -n cert-manager logs cert-manager-cainjector-7cdbb9c945-g6bt4
|
||||||
|
I0703 03:31:55.159339 1 start.go:91] "starting" version="v1.1.1" revision="3ac7418070e22c87fae4b22603a6b952f797ae96"
|
||||||
|
I0703 03:31:55.615061 1 leaderelection.go:243] attempting to acquire leader lease kube-system/cert-manager-cainjector-leader-election...
|
||||||
|
I0703 03:32:10.738039 1 leaderelection.go:253] successfully acquired lease kube-system/cert-manager-cainjector-leader-election
|
||||||
|
I0703 03:32:10.739941 1 recorder.go:52] cert-manager/controller-runtime/manager/events "msg"="Normal" "message"="cert-manager-cainjector-7cdbb9c945-g6bt4_88e4bc70-eded-4343-a6fb-0ddd6434eb55 became leader" "object"={"kind":"ConfigMap","namespace":"kube-system","name":"cert-manager-cainjector-leader-election","uid":"942a021e-364c-461a-978c-f54a95723cdc","apiVersion":"v1","resourceVersion":"1576"} "reason"="LeaderElection"
|
||||||
|
E0703 03:32:11.192128 1 start.go:119] cert-manager/ca-injector "msg"="manager goroutine exited" "error"=null
|
||||||
|
I0703 03:32:12.339197 1 request.go:645] Throttling request took 1.047437675s, request: GET:https://10.96.0.1:443/apis/storage.k8s.io/v1beta1?timeout=32s
|
||||||
|
E0703 03:32:13.143790 1 start.go:151] cert-manager/ca-injector "msg"="Error registering certificate based controllers. Retrying after 5 seconds." "error"="no matches for kind \"MutatingWebhookConfiguration\" in version \"admissionregistration.k8s.io/v1beta1\""
|
||||||
|
Error: error registering secret controller: no matches for kind "MutatingWebhookConfiguration" in version "admissionregistration.k8s.io/v1beta1"
|
||||||
|
```
|
||||||
|
|
||||||
|
**Solution**
|
||||||
|
|
||||||
|
Your cluster is based on a new enough Kubernetes of version 1.22 or greater which does not support the legacy `admissionregistration.k8s.io/v1beta1` API anymore, and your `cert-manager` is not up-to-date hence it's still trying to use the leagcy Kubernetes API.
|
||||||
|
|
||||||
|
In many cases, it's not an option to downgrade Kubernetes. So, just upgrade `cert-manager` to a more recent version that does have have the support for the specific Kubernetes version you're using.
|
||||||
|
|
||||||
|
See https://cert-manager.io/docs/installation/supported-releases/ for the list of available cert-manager versions.
|
||||||
|
|
||||||
## Operations
|
## Operations
|
||||||
|
|
||||||
Troubeshooting runbooks that relate to ARC operational problems
|
Troubeshooting runbooks that relate to ARC operational problems
|
||||||
|
|||||||
97
acceptance/argotunnel.sh
Executable file
97
acceptance/argotunnel.sh
Executable file
@@ -0,0 +1,97 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# See https://developers.cloudflare.com/cloudflare-one/tutorials/many-cfd-one-tunnel/
|
||||||
|
|
||||||
|
kubectl create ns tunnel || :
|
||||||
|
|
||||||
|
kubectl -n tunnel delete secret tunnel-credentials || :
|
||||||
|
|
||||||
|
kubectl -n tunnel create secret generic tunnel-credentials \
|
||||||
|
--from-file=credentials.json=$HOME/.cloudflared/${TUNNEL_ID}.json || :
|
||||||
|
|
||||||
|
cat <<MANIFEST | kubectl -n tunnel ${OP} -f -
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: cloudflared
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: cloudflared
|
||||||
|
replicas: 2 # You could also consider elastic scaling for this deployment
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: cloudflared
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: cloudflared
|
||||||
|
image: cloudflare/cloudflared:latest
|
||||||
|
args:
|
||||||
|
- tunnel
|
||||||
|
# Points cloudflared to the config file, which configures what
|
||||||
|
# cloudflared will actually do. This file is created by a ConfigMap
|
||||||
|
# below.
|
||||||
|
- --config
|
||||||
|
- /etc/cloudflared/config/config.yaml
|
||||||
|
- run
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
# Cloudflared has a /ready endpoint which returns 200 if and only if
|
||||||
|
# it has an active connection to the edge.
|
||||||
|
path: /ready
|
||||||
|
port: 2000
|
||||||
|
failureThreshold: 1
|
||||||
|
initialDelaySeconds: 10
|
||||||
|
periodSeconds: 10
|
||||||
|
volumeMounts:
|
||||||
|
- name: config
|
||||||
|
mountPath: /etc/cloudflared/config
|
||||||
|
readOnly: true
|
||||||
|
# Each tunnel has an associated "credentials file" which authorizes machines
|
||||||
|
# to run the tunnel. cloudflared will read this file from its local filesystem,
|
||||||
|
# and it'll be stored in a k8s secret.
|
||||||
|
- name: creds
|
||||||
|
mountPath: /etc/cloudflared/creds
|
||||||
|
readOnly: true
|
||||||
|
volumes:
|
||||||
|
- name: creds
|
||||||
|
secret:
|
||||||
|
secretName: tunnel-credentials
|
||||||
|
# Create a config.yaml file from the ConfigMap below.
|
||||||
|
- name: config
|
||||||
|
configMap:
|
||||||
|
name: cloudflared
|
||||||
|
items:
|
||||||
|
- key: config.yaml
|
||||||
|
path: config.yaml
|
||||||
|
---
|
||||||
|
# This ConfigMap is just a way to define the cloudflared config.yaml file in k8s.
|
||||||
|
# It's useful to define it in k8s, rather than as a stand-alone .yaml file, because
|
||||||
|
# this lets you use various k8s templating solutions (e.g. Helm charts) to
|
||||||
|
# parameterize your config, instead of just using string literals.
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: cloudflared
|
||||||
|
data:
|
||||||
|
config.yaml: |
|
||||||
|
# Name of the tunnel you want to run
|
||||||
|
tunnel: ${TUNNEL_NAME}
|
||||||
|
credentials-file: /etc/cloudflared/creds/credentials.json
|
||||||
|
# Serves the metrics server under /metrics and the readiness server under /ready
|
||||||
|
metrics: 0.0.0.0:2000
|
||||||
|
# Autoupdates applied in a k8s pod will be lost when the pod is removed or restarted, so
|
||||||
|
# autoupdate doesn't make sense in Kubernetes. However, outside of Kubernetes, we strongly
|
||||||
|
# recommend using autoupdate.
|
||||||
|
no-autoupdate: true
|
||||||
|
ingress:
|
||||||
|
# The first rule proxies traffic to the httpbin sample Service defined in app.yaml
|
||||||
|
- hostname: ${TUNNEL_HOSTNAME}
|
||||||
|
service: http://actions-runner-controller-github-webhook-server.actions-runner-system:80
|
||||||
|
# This rule matches any traffic which didn't match a previous rule, and responds with HTTP 404.
|
||||||
|
- service: http_status:404
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
|
kubectl -n tunnel delete po -l app=cloudflared || :
|
||||||
@@ -51,6 +51,10 @@ if [ "${tool}" == "helm" ]; then
|
|||||||
--set image.tag=${VERSION} \
|
--set image.tag=${VERSION} \
|
||||||
--set podAnnotations.test-id=${TEST_ID} \
|
--set podAnnotations.test-id=${TEST_ID} \
|
||||||
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
||||||
|
--set imagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||||
|
--set image.actionsRunnerImagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||||
|
--set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET} \
|
||||||
|
--set image.imagePullPolicy=${IMAGE_PULL_POLICY} \
|
||||||
-f ${VALUES_FILE}
|
-f ${VALUES_FILE}
|
||||||
set +v
|
set +v
|
||||||
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
||||||
@@ -76,56 +80,3 @@ kubectl -n actions-runner-system wait deploy/actions-runner-controller --for con
|
|||||||
|
|
||||||
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
|
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
|
||||||
sleep 20
|
sleep 20
|
||||||
|
|
||||||
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
|
||||||
|
|
||||||
if [ -n "${TEST_REPO}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl apply -f -
|
|
||||||
else
|
|
||||||
echo 'Deploying runnerdeployment and hra. Set USE_RUNNERSET if you want to deploy runnerset instead.'
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerdeploy envsubst | kubectl apply -f -
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo 'Skipped deploying runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy.'
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${TEST_ORG}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerset envsubst | kubectl apply -f -
|
|
||||||
else
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerdeploy envsubst | kubectl apply -f -
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${TEST_ORG_GROUP}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerset envsubst | kubectl apply -f -
|
|
||||||
else
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerdeploy envsubst | kubectl apply -f -
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ORG_GROUP to deploy.'
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo 'Skipped deploying organizational runnerdeployment. Set TEST_ORG to deploy.'
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${TEST_ENTERPRISE}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerset envsubst | kubectl apply -f -
|
|
||||||
else
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerdeploy envsubst | kubectl apply -f -
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${TEST_ENTERPRISE_GROUP}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerset envsubst | kubectl apply -f -
|
|
||||||
else
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerdeploy envsubst | kubectl apply -f -
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ENTERPRISE_GROUP to deploy.'
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo 'Skipped deploying enterprise runnerdeployment. Set TEST_ENTERPRISE to deploy.'
|
|
||||||
fi
|
|
||||||
|
|||||||
58
acceptance/deploy_runners.sh
Executable file
58
acceptance/deploy_runners.sh
Executable file
@@ -0,0 +1,58 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
OP=${OP:-apply}
|
||||||
|
|
||||||
|
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
||||||
|
|
||||||
|
if [ -n "${TEST_REPO}" ]; then
|
||||||
|
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||||
|
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl ${OP} -f -
|
||||||
|
else
|
||||||
|
echo "Running ${OP} runnerdeployment and hra. Set USE_RUNNERSET if you want to deploy runnerset instead."
|
||||||
|
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Skipped ${OP} for runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy."
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${TEST_ORG}" ]; then
|
||||||
|
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||||
|
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerset envsubst | kubectl ${OP} -f -
|
||||||
|
else
|
||||||
|
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${TEST_ORG_GROUP}" ]; then
|
||||||
|
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||||
|
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerset envsubst | kubectl ${OP} -f -
|
||||||
|
else
|
||||||
|
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ORG_GROUP to ${OP}."
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Skipped ${OP} on organizational runnerdeployment. Set TEST_ORG to ${OP}."
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${TEST_ENTERPRISE}" ]; then
|
||||||
|
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||||
|
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerset envsubst | kubectl ${OP} -f -
|
||||||
|
else
|
||||||
|
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${TEST_ENTERPRISE_GROUP}" ]; then
|
||||||
|
if [ "${USE_RUNNERSET}" != "false" ]; then
|
||||||
|
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerset envsubst | kubectl ${OP} -f -
|
||||||
|
else
|
||||||
|
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerdeploy envsubst | kubectl ${OP} -f -
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ENTERPRISE_GROUP to ${OP}."
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ENTERPRISE to ${OP}."
|
||||||
|
fi
|
||||||
82
acceptance/testdata/kubernetes_container_mode.envsubst.yaml
vendored
Normal file
82
acceptance/testdata/kubernetes_container_mode.envsubst.yaml
vendored
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
# USAGE:
|
||||||
|
# cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=default envsubst | kubectl apply -f -
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: k8s-mode-runner
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods"]
|
||||||
|
verbs: ["get", "list", "create", "delete"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods/exec"]
|
||||||
|
verbs: ["get", "create"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods/log"]
|
||||||
|
verbs: ["get", "list", "watch",]
|
||||||
|
- apiGroups: ["batch"]
|
||||||
|
resources: ["jobs"]
|
||||||
|
verbs: ["get", "list", "create", "delete"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["secrets"]
|
||||||
|
verbs: ["get", "list", "create", "delete"]
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: runner-status-updater
|
||||||
|
rules:
|
||||||
|
- apiGroups: ["actions.summerwind.dev"]
|
||||||
|
resources: ["runners/status"]
|
||||||
|
verbs: ["get", "update", "patch"]
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: runner
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
---
|
||||||
|
# To verify it's working, try:
|
||||||
|
# kubectl auth can-i --as system:serviceaccount:default:runner get pod
|
||||||
|
# If incomplete, workflows and jobs would fail with an error message like:
|
||||||
|
# Error: Error: The Service account needs the following permissions [{"group":"","verbs":["get","list","create","delete"],"resource":"pods","subresource":""},{"group":"","verbs":["get","create"],"resource":"pods","subresource":"exec"},{"group":"","verbs":["get","list","watch"],"resource":"pods","subresource":"log"},{"group":"batch","verbs":["get","list","create","delete"],"resource":"jobs","subresource":""},{"group":"","verbs":["create","delete","get","list"],"resource":"secrets","subresource":""}] on the pod resource in the 'default' namespace. Please contact your self hosted runner administrator.
|
||||||
|
# Error: Process completed with exit code 1.
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
# This role binding allows "jane" to read pods in the "default" namespace.
|
||||||
|
# You need to already have a Role named "pod-reader" in that namespace.
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
name: runner-k8s-mode-runner
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: runner
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: k8s-mode-runner
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
name: runner-runner-stat-supdater
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: runner
|
||||||
|
namespace: ${NAMESPACE}
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: runner-status-updater
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: org-runnerdeploy-runner-work-dir
|
||||||
|
labels:
|
||||||
|
content: org-runnerdeploy-runner-work-dir
|
||||||
|
provisioner: rancher.io/local-path
|
||||||
|
reclaimPolicy: Delete
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
11
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
11
acceptance/testdata/runnerdeploy.envsubst.yaml
vendored
@@ -43,6 +43,17 @@ spec:
|
|||||||
# Non-standard working directory
|
# Non-standard working directory
|
||||||
#
|
#
|
||||||
# workDir: "/"
|
# workDir: "/"
|
||||||
|
|
||||||
|
# # Uncomment the below to enable the kubernetes container mode
|
||||||
|
# # See https://github.com/actions-runner-controller/actions-runner-controller#runner-with-k8s-jobs
|
||||||
|
containerMode: kubernetes
|
||||||
|
workVolumeClaimTemplate:
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
storageClassName: "${NAME}-runner-work-dir"
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 10Gi
|
||||||
---
|
---
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
kind: HorizontalRunnerAutoscaler
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
|||||||
36
acceptance/testdata/runnerset.envsubst.yaml
vendored
36
acceptance/testdata/runnerset.envsubst.yaml
vendored
@@ -122,8 +122,10 @@ spec:
|
|||||||
value: "/home/runner/.cache/go-mod"
|
value: "/home/runner/.cache/go-mod"
|
||||||
# PV-backed runner work dir
|
# PV-backed runner work dir
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: work
|
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
||||||
mountPath: /runner/_work
|
# The volume and mount with the same names will be created by workVolumeClaimTemplate and the kubernetes container mode support.
|
||||||
|
# - name: work
|
||||||
|
# mountPath: /runner/_work
|
||||||
# Cache docker image layers, in case dockerdWithinRunnerContainer=true
|
# Cache docker image layers, in case dockerdWithinRunnerContainer=true
|
||||||
- name: var-lib-docker
|
- name: var-lib-docker
|
||||||
mountPath: /var/lib/docker
|
mountPath: /var/lib/docker
|
||||||
@@ -163,17 +165,18 @@ spec:
|
|||||||
# For buildx cache
|
# For buildx cache
|
||||||
- name: cache
|
- name: cache
|
||||||
mountPath: "/home/runner/.cache"
|
mountPath: "/home/runner/.cache"
|
||||||
volumes:
|
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
||||||
- name: work
|
# volumes:
|
||||||
ephemeral:
|
# - name: work
|
||||||
volumeClaimTemplate:
|
# ephemeral:
|
||||||
spec:
|
# volumeClaimTemplate:
|
||||||
accessModes:
|
# spec:
|
||||||
- ReadWriteOnce
|
# accessModes:
|
||||||
storageClassName: "${NAME}-runner-work-dir"
|
# - ReadWriteOnce
|
||||||
resources:
|
# storageClassName: "${NAME}-runner-work-dir"
|
||||||
requests:
|
# resources:
|
||||||
storage: 10Gi
|
# requests:
|
||||||
|
# storage: 10Gi
|
||||||
volumeClaimTemplates:
|
volumeClaimTemplates:
|
||||||
- metadata:
|
- metadata:
|
||||||
name: vol1
|
name: vol1
|
||||||
@@ -251,3 +254,10 @@ spec:
|
|||||||
minReplicas: ${RUNNER_MIN_REPLICAS}
|
minReplicas: ${RUNNER_MIN_REPLICAS}
|
||||||
maxReplicas: 10
|
maxReplicas: 10
|
||||||
scaleDownDelaySecondsAfterScaleOut: ${RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT}
|
scaleDownDelaySecondsAfterScaleOut: ${RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT}
|
||||||
|
# Comment out the whole metrics if you'd like to solely test webhook-based scaling
|
||||||
|
metrics:
|
||||||
|
- type: PercentageRunnersBusy
|
||||||
|
scaleUpThreshold: '0.75'
|
||||||
|
scaleDownThreshold: '0.25'
|
||||||
|
scaleUpFactor: '2'
|
||||||
|
scaleDownFactor: '0.5'
|
||||||
|
|||||||
@@ -1,6 +1,18 @@
|
|||||||
# Set actions-runner-controller settings for testing
|
# Set actions-runner-controller settings for testing
|
||||||
logLevel: "-4"
|
logLevel: "-4"
|
||||||
|
imagePullSecrets:
|
||||||
|
- name:
|
||||||
|
image:
|
||||||
|
actionsRunnerImagePullSecrets:
|
||||||
|
- name:
|
||||||
|
runner:
|
||||||
|
statusUpdateHook:
|
||||||
|
enabled: true
|
||||||
|
rbac:
|
||||||
|
allowGrantingKubernetesContainerModePermissions: true
|
||||||
githubWebhookServer:
|
githubWebhookServer:
|
||||||
|
imagePullSecrets:
|
||||||
|
- name:
|
||||||
logLevel: "-4"
|
logLevel: "-4"
|
||||||
enabled: true
|
enabled: true
|
||||||
labels: {}
|
labels: {}
|
||||||
|
|||||||
@@ -60,6 +60,9 @@ type HorizontalRunnerAutoscalerSpec struct {
|
|||||||
// The earlier a scheduled override is, the higher it is prioritized.
|
// The earlier a scheduled override is, the higher it is prioritized.
|
||||||
// +optional
|
// +optional
|
||||||
ScheduledOverrides []ScheduledOverride `json:"scheduledOverrides,omitempty"`
|
ScheduledOverrides []ScheduledOverride `json:"scheduledOverrides,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
GitHubAPICredentialsFrom *GitHubAPICredentialsFrom `json:"githubAPICredentialsFrom,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ScaleUpTrigger struct {
|
type ScaleUpTrigger struct {
|
||||||
|
|||||||
@@ -18,8 +18,10 @@ package v1alpha1
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@@ -71,6 +73,19 @@ type RunnerConfig struct {
|
|||||||
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
|
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
|
||||||
// +optional
|
// +optional
|
||||||
VolumeStorageMedium *string `json:"volumeStorageMedium,omitempty"`
|
VolumeStorageMedium *string `json:"volumeStorageMedium,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
ContainerMode string `json:"containerMode,omitempty"`
|
||||||
|
|
||||||
|
GitHubAPICredentialsFrom *GitHubAPICredentialsFrom `json:"githubAPICredentialsFrom,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GitHubAPICredentialsFrom struct {
|
||||||
|
SecretRef SecretReference `json:"secretRef,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SecretReference struct {
|
||||||
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunnerPodSpec defines the desired pod spec fields of the runner pod
|
// RunnerPodSpec defines the desired pod spec fields of the runner pod
|
||||||
@@ -135,6 +150,9 @@ type RunnerPodSpec struct {
|
|||||||
// +optional
|
// +optional
|
||||||
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
PriorityClassName string `json:"priorityClassName,omitempty"`
|
||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
|
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
|
||||||
|
|
||||||
@@ -154,10 +172,32 @@ type RunnerPodSpec struct {
|
|||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
DnsConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"`
|
DnsConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
WorkVolumeClaimTemplate *WorkVolumeClaimTemplate `json:"workVolumeClaimTemplate,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RunnerSpec) Validate(rootPath *field.Path) field.ErrorList {
|
||||||
|
var (
|
||||||
|
errList field.ErrorList
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
err = rs.validateRepository()
|
||||||
|
if err != nil {
|
||||||
|
errList = append(errList, field.Invalid(rootPath.Child("repository"), rs.Repository, err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
err = rs.validateWorkVolumeClaimTemplate()
|
||||||
|
if err != nil {
|
||||||
|
errList = append(errList, field.Invalid(rootPath.Child("workVolumeClaimTemplate"), rs.WorkVolumeClaimTemplate, err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
return errList
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateRepository validates repository field.
|
// ValidateRepository validates repository field.
|
||||||
func (rs *RunnerSpec) ValidateRepository() error {
|
func (rs *RunnerSpec) validateRepository() error {
|
||||||
// Enterprise, Organization and repository are both exclusive.
|
// Enterprise, Organization and repository are both exclusive.
|
||||||
foundCount := 0
|
foundCount := 0
|
||||||
if len(rs.Organization) > 0 {
|
if len(rs.Organization) > 0 {
|
||||||
@@ -179,6 +219,18 @@ func (rs *RunnerSpec) ValidateRepository() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (rs *RunnerSpec) validateWorkVolumeClaimTemplate() error {
|
||||||
|
if rs.ContainerMode != "kubernetes" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.WorkVolumeClaimTemplate == nil {
|
||||||
|
return errors.New("Spec.ContainerMode: kubernetes must have workVolumeClaimTemplate field specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
return rs.WorkVolumeClaimTemplate.validate()
|
||||||
|
}
|
||||||
|
|
||||||
// RunnerStatus defines the observed state of Runner
|
// RunnerStatus defines the observed state of Runner
|
||||||
type RunnerStatus struct {
|
type RunnerStatus struct {
|
||||||
// Turns true only if the runner pod is ready.
|
// Turns true only if the runner pod is ready.
|
||||||
@@ -207,13 +259,60 @@ type RunnerStatusRegistration struct {
|
|||||||
ExpiresAt metav1.Time `json:"expiresAt"`
|
ExpiresAt metav1.Time `json:"expiresAt"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type WorkVolumeClaimTemplate struct {
|
||||||
|
StorageClassName string `json:"storageClassName"`
|
||||||
|
AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes"`
|
||||||
|
Resources corev1.ResourceRequirements `json:"resources"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkVolumeClaimTemplate) validate() error {
|
||||||
|
if w.AccessModes == nil || len(w.AccessModes) == 0 {
|
||||||
|
return errors.New("Access mode should have at least one mode specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, accessMode := range w.AccessModes {
|
||||||
|
switch accessMode {
|
||||||
|
case corev1.ReadWriteOnce, corev1.ReadWriteMany:
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Access mode %v is not supported", accessMode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkVolumeClaimTemplate) V1Volume() corev1.Volume {
|
||||||
|
return corev1.Volume{
|
||||||
|
Name: "work",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
Ephemeral: &corev1.EphemeralVolumeSource{
|
||||||
|
VolumeClaimTemplate: &corev1.PersistentVolumeClaimTemplate{
|
||||||
|
Spec: corev1.PersistentVolumeClaimSpec{
|
||||||
|
AccessModes: w.AccessModes,
|
||||||
|
StorageClassName: &w.StorageClassName,
|
||||||
|
Resources: w.Resources,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkVolumeClaimTemplate) V1VolumeMount(mountPath string) corev1.VolumeMount {
|
||||||
|
return corev1.VolumeMount{
|
||||||
|
MountPath: mountPath,
|
||||||
|
Name: "work",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
// +kubebuilder:subresource:status
|
// +kubebuilder:subresource:status
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.enterprise",name=Enterprise,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.enterprise",name=Enterprise,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.organization",name=Organization,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.organization",name=Organization,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.repository",name=Repository,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.repository",name=Repository,type=string
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".spec.group",name=Group,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
|
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".status.message",name=Message,type=string
|
||||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||||
|
|
||||||
// Runner is the Schema for the runners API
|
// Runner is the Schema for the runners API
|
||||||
|
|||||||
@@ -66,15 +66,7 @@ func (r *Runner) ValidateDelete() error {
|
|||||||
|
|
||||||
// Validate validates resource spec.
|
// Validate validates resource spec.
|
||||||
func (r *Runner) Validate() error {
|
func (r *Runner) Validate() error {
|
||||||
var (
|
errList := r.Spec.Validate(field.NewPath("spec"))
|
||||||
errList field.ErrorList
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
err = r.Spec.ValidateRepository()
|
|
||||||
if err != nil {
|
|
||||||
errList = append(errList, field.Invalid(field.NewPath("spec", "repository"), r.Spec.Repository, err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errList) > 0 {
|
if len(errList) > 0 {
|
||||||
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
||||||
|
|||||||
@@ -66,15 +66,7 @@ func (r *RunnerDeployment) ValidateDelete() error {
|
|||||||
|
|
||||||
// Validate validates resource spec.
|
// Validate validates resource spec.
|
||||||
func (r *RunnerDeployment) Validate() error {
|
func (r *RunnerDeployment) Validate() error {
|
||||||
var (
|
errList := r.Spec.Template.Spec.Validate(field.NewPath("spec", "template", "spec"))
|
||||||
errList field.ErrorList
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
err = r.Spec.Template.Spec.ValidateRepository()
|
|
||||||
if err != nil {
|
|
||||||
errList = append(errList, field.Invalid(field.NewPath("spec", "template", "spec", "repository"), r.Spec.Template.Spec.Repository, err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errList) > 0 {
|
if len(errList) > 0 {
|
||||||
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
||||||
|
|||||||
@@ -66,15 +66,7 @@ func (r *RunnerReplicaSet) ValidateDelete() error {
|
|||||||
|
|
||||||
// Validate validates resource spec.
|
// Validate validates resource spec.
|
||||||
func (r *RunnerReplicaSet) Validate() error {
|
func (r *RunnerReplicaSet) Validate() error {
|
||||||
var (
|
errList := r.Spec.Template.Spec.Validate(field.NewPath("spec", "template", "spec"))
|
||||||
errList field.ErrorList
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
err = r.Spec.Template.Spec.ValidateRepository()
|
|
||||||
if err != nil {
|
|
||||||
errList = append(errList, field.Invalid(field.NewPath("spec", "template", "spec", "repository"), r.Spec.Template.Spec.Repository, err.Error()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errList) > 0 {
|
if len(errList) > 0 {
|
||||||
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
return apierrors.NewInvalid(r.GroupVersionKind().GroupKind(), r.Name, errList)
|
||||||
|
|||||||
@@ -33,6 +33,12 @@ type RunnerSetSpec struct {
|
|||||||
// +nullable
|
// +nullable
|
||||||
EffectiveTime *metav1.Time `json:"effectiveTime,omitempty"`
|
EffectiveTime *metav1.Time `json:"effectiveTime,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
ServiceAccountName string `json:"serviceAccountName,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
WorkVolumeClaimTemplate *WorkVolumeClaimTemplate `json:"workVolumeClaimTemplate,omitempty"`
|
||||||
|
|
||||||
appsv1.StatefulSetSpec `json:",inline"`
|
appsv1.StatefulSetSpec `json:",inline"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -90,6 +90,22 @@ func (in *CheckRunSpec) DeepCopy() *CheckRunSpec {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *GitHubAPICredentialsFrom) DeepCopyInto(out *GitHubAPICredentialsFrom) {
|
||||||
|
*out = *in
|
||||||
|
out.SecretRef = in.SecretRef
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubAPICredentialsFrom.
|
||||||
|
func (in *GitHubAPICredentialsFrom) DeepCopy() *GitHubAPICredentialsFrom {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(GitHubAPICredentialsFrom)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *GitHubEventScaleUpTriggerSpec) DeepCopyInto(out *GitHubEventScaleUpTriggerSpec) {
|
func (in *GitHubEventScaleUpTriggerSpec) DeepCopyInto(out *GitHubEventScaleUpTriggerSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -231,6 +247,11 @@ func (in *HorizontalRunnerAutoscalerSpec) DeepCopyInto(out *HorizontalRunnerAuto
|
|||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.GitHubAPICredentialsFrom != nil {
|
||||||
|
in, out := &in.GitHubAPICredentialsFrom, &out.GitHubAPICredentialsFrom
|
||||||
|
*out = new(GitHubAPICredentialsFrom)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerSpec.
|
||||||
@@ -425,6 +446,11 @@ func (in *RunnerConfig) DeepCopyInto(out *RunnerConfig) {
|
|||||||
*out = new(string)
|
*out = new(string)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.GitHubAPICredentialsFrom != nil {
|
||||||
|
in, out := &in.GitHubAPICredentialsFrom, &out.GitHubAPICredentialsFrom
|
||||||
|
*out = new(GitHubAPICredentialsFrom)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerConfig.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerConfig.
|
||||||
@@ -741,6 +767,11 @@ func (in *RunnerPodSpec) DeepCopyInto(out *RunnerPodSpec) {
|
|||||||
*out = new(v1.PodDNSConfig)
|
*out = new(v1.PodDNSConfig)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
|
if in.WorkVolumeClaimTemplate != nil {
|
||||||
|
in, out := &in.WorkVolumeClaimTemplate, &out.WorkVolumeClaimTemplate
|
||||||
|
*out = new(WorkVolumeClaimTemplate)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerPodSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerPodSpec.
|
||||||
@@ -939,6 +970,11 @@ func (in *RunnerSetSpec) DeepCopyInto(out *RunnerSetSpec) {
|
|||||||
in, out := &in.EffectiveTime, &out.EffectiveTime
|
in, out := &in.EffectiveTime, &out.EffectiveTime
|
||||||
*out = (*in).DeepCopy()
|
*out = (*in).DeepCopy()
|
||||||
}
|
}
|
||||||
|
if in.WorkVolumeClaimTemplate != nil {
|
||||||
|
in, out := &in.WorkVolumeClaimTemplate, &out.WorkVolumeClaimTemplate
|
||||||
|
*out = new(WorkVolumeClaimTemplate)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
in.StatefulSetSpec.DeepCopyInto(&out.StatefulSetSpec)
|
in.StatefulSetSpec.DeepCopyInto(&out.StatefulSetSpec)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1126,6 +1162,42 @@ func (in *ScheduledOverride) DeepCopy() *ScheduledOverride {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
|
||||||
|
func (in *SecretReference) DeepCopy() *SecretReference {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(SecretReference)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *WorkVolumeClaimTemplate) DeepCopyInto(out *WorkVolumeClaimTemplate) {
|
||||||
|
*out = *in
|
||||||
|
if in.AccessModes != nil {
|
||||||
|
in, out := &in.AccessModes, &out.AccessModes
|
||||||
|
*out = make([]v1.PersistentVolumeAccessMode, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
in.Resources.DeepCopyInto(&out.Resources)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkVolumeClaimTemplate.
|
||||||
|
func (in *WorkVolumeClaimTemplate) DeepCopy() *WorkVolumeClaimTemplate {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(WorkVolumeClaimTemplate)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *WorkflowJobSpec) DeepCopyInto(out *WorkflowJobSpec) {
|
func (in *WorkflowJobSpec) DeepCopyInto(out *WorkflowJobSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
|||||||
@@ -15,10 +15,10 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.19.1
|
version: 0.20.2
|
||||||
|
|
||||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||||
appVersion: 0.24.1
|
appVersion: 0.25.2
|
||||||
|
|
||||||
home: https://github.com/actions-runner-controller/actions-runner-controller
|
home: https://github.com/actions-runner-controller/actions-runner-controller
|
||||||
|
|
||||||
|
|||||||
@@ -73,11 +73,11 @@ All additional docs are kept in the `docs/` folder, this README is solely for do
|
|||||||
| `scope.watchNamespace` | Tells the controller and the github webhook server which namespace to watch if `scope.singleNamespace` is true | `Release.Namespace` (the default namespace of the helm chart). |
|
| `scope.watchNamespace` | Tells the controller and the github webhook server which namespace to watch if `scope.singleNamespace` is true | `Release.Namespace` (the default namespace of the helm chart). |
|
||||||
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
|
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
|
||||||
| `certManagerEnabled` | Enable cert-manager. If disabled you must set admissionWebHooks.caBundle and create TLS secrets manually | true |
|
| `certManagerEnabled` | Enable cert-manager. If disabled you must set admissionWebHooks.caBundle and create TLS secrets manually | true |
|
||||||
|
| `runner.statusUpdateHook.enabled` | Use custom RBAC for runners (role, role binding and service account), this will enable reporting runner statuses | false |
|
||||||
| `admissionWebHooks.caBundle` | Base64-encoded PEM bundle containing the CA that signed the webhook's serving certificate | |
|
| `admissionWebHooks.caBundle` | Base64-encoded PEM bundle containing the CA that signed the webhook's serving certificate | |
|
||||||
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
|
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
|
||||||
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
|
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
|
||||||
| `githubWebhookServer.useRunnerGroupsVisibility` | Enable supporting runner groups with custom visibility. This will incur in extra API calls and may blow up your budget. Currently, you also need to set `githubWebhookServer.secret.enabled` to enable this feature. | false |
|
| `githubWebhookServer.useRunnerGroupsVisibility` | Enable supporting runner groups with custom visibility. This will incur in extra API calls and may blow up your budget. Currently, you also need to set `githubWebhookServer.secret.enabled` to enable this feature. | false |
|
||||||
| `githubWebhookServer.syncPeriod` | Set the period in which the controller reconciles the resources | 10m |
|
|
||||||
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
|
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
|
||||||
| `githubWebhookServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
|
| `githubWebhookServer.secret.enabled` | Passes the webhook hook secret to the github-webhook-server | false |
|
||||||
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
|
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
|
||||||
|
|||||||
@@ -61,6 +61,16 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
githubAPICredentialsFrom:
|
||||||
|
properties:
|
||||||
|
secretRef:
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
maxReplicas:
|
maxReplicas:
|
||||||
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
||||||
type: integer
|
type: integer
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -58,15 +58,15 @@ spec:
|
|||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
|
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.githubAPICacheDuration }}
|
|
||||||
- "--github-api-cache-duration={{ .Values.githubAPICacheDuration }}"
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.logLevel }}
|
{{- if .Values.logLevel }}
|
||||||
- "--log-level={{ .Values.logLevel }}"
|
- "--log-level={{ .Values.logLevel }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.runnerGithubURL }}
|
{{- if .Values.runnerGithubURL }}
|
||||||
- "--runner-github-url={{ .Values.runnerGithubURL }}"
|
- "--runner-github-url={{ .Values.runnerGithubURL }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if .Values.runner.statusUpdateHook.enabled }}
|
||||||
|
- "--runner-status-update-hook"
|
||||||
|
{{- end }}
|
||||||
command:
|
command:
|
||||||
- "/manager"
|
- "/manager"
|
||||||
env:
|
env:
|
||||||
@@ -118,10 +118,14 @@ spec:
|
|||||||
name: {{ include "actions-runner-controller.secretName" . }}
|
name: {{ include "actions-runner-controller.secretName" . }}
|
||||||
optional: true
|
optional: true
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if kindIs "slice" .Values.env }}
|
||||||
|
{{- toYaml .Values.env | nindent 8 }}
|
||||||
|
{{- else }}
|
||||||
{{- range $key, $val := .Values.env }}
|
{{- range $key, $val := .Values.env }}
|
||||||
- name: {{ $key }}
|
- name: {{ $key }}
|
||||||
value: {{ $val | quote }}
|
value: {{ $val | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||||
name: manager
|
name: manager
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
|||||||
@@ -39,7 +39,6 @@ spec:
|
|||||||
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||||
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
||||||
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
||||||
- "--sync-period={{ .Values.githubWebhookServer.syncPeriod }}"
|
|
||||||
{{- if .Values.githubWebhookServer.logLevel }}
|
{{- if .Values.githubWebhookServer.logLevel }}
|
||||||
- "--log-level={{ .Values.githubWebhookServer.logLevel }}"
|
- "--log-level={{ .Values.githubWebhookServer.logLevel }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
{{- if .Values.githubWebhookServer.ingress.enabled -}}
|
{{- if .Values.githubWebhookServer.ingress.enabled -}}
|
||||||
{{- $fullName := include "actions-runner-controller-github-webhook-server.fullname" . -}}
|
{{- $fullName := include "actions-runner-controller-github-webhook-server.fullname" . -}}
|
||||||
{{- $svcPort := (index .Values.githubWebhookServer.service.ports 0).port -}}
|
{{- $svcPort := (index .Values.githubWebhookServer.service.ports 0).port -}}
|
||||||
{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
|
{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
||||||
apiVersion: networking.k8s.io/v1
|
apiVersion: networking.k8s.io/v1
|
||||||
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
|
{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" }}
|
||||||
apiVersion: networking.k8s.io/v1beta1
|
apiVersion: networking.k8s.io/v1beta1
|
||||||
{{- else if .Capabilities.APIVersions.Has "extensions/v1beta1" }}
|
{{- else if .Capabilities.APIVersions.Has "extensions/v1beta1/Ingress" }}
|
||||||
apiVersion: extensions/v1beta1
|
apiVersion: extensions/v1beta1
|
||||||
{{- end }}
|
{{- end }}
|
||||||
kind: Ingress
|
kind: Ingress
|
||||||
@@ -42,11 +42,11 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- range .paths }}
|
{{- range .paths }}
|
||||||
- path: {{ .path }}
|
- path: {{ .path }}
|
||||||
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
|
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
||||||
pathType: {{ .pathType }}
|
pathType: {{ .pathType }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
backend:
|
backend:
|
||||||
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1" }}
|
{{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" }}
|
||||||
service:
|
service:
|
||||||
name: {{ $fullName }}
|
name: {{ $fullName }}
|
||||||
port:
|
port:
|
||||||
|
|||||||
@@ -12,5 +12,17 @@ data:
|
|||||||
{{- if .Values.githubWebhookServer.secret.github_webhook_secret_token }}
|
{{- if .Values.githubWebhookServer.secret.github_webhook_secret_token }}
|
||||||
github_webhook_secret_token: {{ .Values.githubWebhookServer.secret.github_webhook_secret_token | toString | b64enc }}
|
github_webhook_secret_token: {{ .Values.githubWebhookServer.secret.github_webhook_secret_token | toString | b64enc }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if .Values.githubWebhookServer.secret.github_app_id }}
|
||||||
|
github_app_id: {{ .Values.githubWebhookServer.secret.github_app_id | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.githubWebhookServer.secret.github_app_installation_id }}
|
||||||
|
github_app_installation_id: {{ .Values.githubWebhookServer.secret.github_app_installation_id | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.githubWebhookServer.secret.github_app_private_key }}
|
||||||
|
github_app_private_key: {{ .Values.githubWebhookServer.secret.github_app_private_key | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.githubWebhookServer.secret.github_token }}
|
||||||
|
github_token: {{ .Values.githubWebhookServer.secret.github_token | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -250,3 +250,72 @@ rules:
|
|||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
- watch
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
{{- if .Values.runner.statusUpdateHook.enabled }}
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- serviceaccounts
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- rolebindings
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- roles
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.rbac.allowGrantingKubernetesContainerModePermissions }}
|
||||||
|
{{/* These permissions are required by ARC to create RBAC resources for the runner pod to use the kubernetes container mode. */}}
|
||||||
|
{{/* See https://github.com/actions-runner-controller/actions-runner-controller/pull/1268/files#r917331632 */}}
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods/exec
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods/log
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- "batch"
|
||||||
|
resources:
|
||||||
|
- jobs
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -15,12 +15,6 @@ enableLeaderElection: true
|
|||||||
# Must be unique if more than one controller installed onto the same namespace.
|
# Must be unique if more than one controller installed onto the same namespace.
|
||||||
#leaderElectionId: "actions-runner-controller"
|
#leaderElectionId: "actions-runner-controller"
|
||||||
|
|
||||||
# DEPRECATED: This has been removed as unnecessary in #1192
|
|
||||||
# The controller tries its best not to repeat the duplicate GitHub API call
|
|
||||||
# within this duration.
|
|
||||||
# Defaults to syncPeriod - 10s.
|
|
||||||
#githubAPICacheDuration: 30s
|
|
||||||
|
|
||||||
# The URL of your GitHub Enterprise server, if you're using one.
|
# The URL of your GitHub Enterprise server, if you're using one.
|
||||||
#githubEnterpriseServerURL: https://github.example.com
|
#githubEnterpriseServerURL: https://github.example.com
|
||||||
|
|
||||||
@@ -67,6 +61,18 @@ imagePullSecrets: []
|
|||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
fullnameOverride: ""
|
fullnameOverride: ""
|
||||||
|
|
||||||
|
runner:
|
||||||
|
statusUpdateHook:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
rbac:
|
||||||
|
{}
|
||||||
|
# # This allows ARC to dynamically create a ServiceAccount and a Role for each Runner pod that uses "kubernetes" container mode,
|
||||||
|
# # by extending ARC's manager role to have the same permissions required by the pod runs the runner agent in "kubernetes" container mode.
|
||||||
|
# # Without this, Kubernetes blocks ARC to create the role to prevent a priviledge escalation.
|
||||||
|
# # See https://github.com/actions-runner-controller/actions-runner-controller/pull/1268/files#r917327010
|
||||||
|
# allowGrantingKubernetesContainerModePermissions: true
|
||||||
|
|
||||||
serviceAccount:
|
serviceAccount:
|
||||||
# Specifies whether a service account should be created
|
# Specifies whether a service account should be created
|
||||||
create: true
|
create: true
|
||||||
@@ -109,7 +115,7 @@ metrics:
|
|||||||
enabled: true
|
enabled: true
|
||||||
image:
|
image:
|
||||||
repository: quay.io/brancz/kube-rbac-proxy
|
repository: quay.io/brancz/kube-rbac-proxy
|
||||||
tag: v0.12.0
|
tag: v0.13.0
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
{}
|
{}
|
||||||
@@ -143,10 +149,20 @@ priorityClassName: ""
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
{}
|
{}
|
||||||
|
# specify additional environment variables for the controller pod.
|
||||||
|
# It's possible to specify either key vale pairs e.g.:
|
||||||
# http_proxy: "proxy.com:8080"
|
# http_proxy: "proxy.com:8080"
|
||||||
# https_proxy: "proxy.com:8080"
|
# https_proxy: "proxy.com:8080"
|
||||||
# no_proxy: ""
|
# no_proxy: ""
|
||||||
|
|
||||||
|
# or a list of complete environment variable definitions e.g.:
|
||||||
|
# - name: GITHUB_APP_INSTALLATION_ID
|
||||||
|
# valueFrom:
|
||||||
|
# secretKeyRef:
|
||||||
|
# key: some_key_in_the_secret
|
||||||
|
# name: some-secret-name
|
||||||
|
# optional: true
|
||||||
|
|
||||||
## specify additional volumes to mount in the manager container, this can be used
|
## specify additional volumes to mount in the manager container, this can be used
|
||||||
## to specify additional storage of material or to inject files from ConfigMaps
|
## to specify additional storage of material or to inject files from ConfigMaps
|
||||||
## into the running container
|
## into the running container
|
||||||
@@ -175,7 +191,6 @@ admissionWebHooks:
|
|||||||
githubWebhookServer:
|
githubWebhookServer:
|
||||||
enabled: false
|
enabled: false
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
syncPeriod: 10m
|
|
||||||
useRunnerGroupsVisibility: false
|
useRunnerGroupsVisibility: false
|
||||||
secret:
|
secret:
|
||||||
enabled: false
|
enabled: false
|
||||||
@@ -183,6 +198,13 @@ githubWebhookServer:
|
|||||||
name: "github-webhook-server"
|
name: "github-webhook-server"
|
||||||
### GitHub Webhook Configuration
|
### GitHub Webhook Configuration
|
||||||
github_webhook_secret_token: ""
|
github_webhook_secret_token: ""
|
||||||
|
### GitHub Apps Configuration
|
||||||
|
## NOTE: IDs MUST be strings, use quotes
|
||||||
|
#github_app_id: ""
|
||||||
|
#github_app_installation_id: ""
|
||||||
|
#github_app_private_key: |
|
||||||
|
### GitHub PAT Configuration
|
||||||
|
#github_token: ""
|
||||||
imagePullSecrets: []
|
imagePullSecrets: []
|
||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
fullnameOverride: ""
|
fullnameOverride: ""
|
||||||
|
|||||||
@@ -69,9 +69,8 @@ func main() {
|
|||||||
|
|
||||||
watchNamespace string
|
watchNamespace string
|
||||||
|
|
||||||
enableLeaderElection bool
|
logLevel string
|
||||||
syncPeriod time.Duration
|
queueLimit int
|
||||||
logLevel string
|
|
||||||
|
|
||||||
ghClient *github.Client
|
ghClient *github.Client
|
||||||
)
|
)
|
||||||
@@ -88,10 +87,8 @@ func main() {
|
|||||||
flag.StringVar(&webhookAddr, "webhook-addr", ":8000", "The address the metric endpoint binds to.")
|
flag.StringVar(&webhookAddr, "webhook-addr", ":8000", "The address the metric endpoint binds to.")
|
||||||
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
|
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
|
||||||
flag.StringVar(&watchNamespace, "watch-namespace", "", "The namespace to watch for HorizontalRunnerAutoscaler's to scale on Webhook. Set to empty for letting it watch for all namespaces.")
|
flag.StringVar(&watchNamespace, "watch-namespace", "", "The namespace to watch for HorizontalRunnerAutoscaler's to scale on Webhook. Set to empty for letting it watch for all namespaces.")
|
||||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
|
||||||
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
|
||||||
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change")
|
|
||||||
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
|
flag.StringVar(&logLevel, "log-level", logging.LogLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
|
||||||
|
flag.IntVar(&queueLimit, "queue-limit", controllers.DefaultQueueLimit, `The maximum length of the scale operation queue. The scale opration is enqueued per every matching webhook event, and the server returns a 500 HTTP status when the queue was already full on enqueue attempt.`)
|
||||||
flag.StringVar(&webhookSecretToken, "github-webhook-secret-token", "", "The personal access token of GitHub.")
|
flag.StringVar(&webhookSecretToken, "github-webhook-secret-token", "", "The personal access token of GitHub.")
|
||||||
flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.")
|
flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.")
|
||||||
flag.Int64Var(&c.AppID, "github-app-id", c.AppID, "The application ID of GitHub App.")
|
flag.Int64Var(&c.AppID, "github-app-id", c.AppID, "The application ID of GitHub App.")
|
||||||
@@ -142,10 +139,10 @@ func main() {
|
|||||||
setupLog.Info("GitHub client is not initialized. Runner groups with custom visibility are not supported. If needed, please provide GitHub authentication. This will incur in extra GitHub API calls")
|
setupLog.Info("GitHub client is not initialized. Runner groups with custom visibility are not supported. If needed, please provide GitHub authentication. This will incur in extra GitHub API calls")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
syncPeriod := 10 * time.Minute
|
||||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
SyncPeriod: &syncPeriod,
|
SyncPeriod: &syncPeriod,
|
||||||
LeaderElection: enableLeaderElection,
|
|
||||||
Namespace: watchNamespace,
|
Namespace: watchNamespace,
|
||||||
MetricsBindAddress: metricsAddr,
|
MetricsBindAddress: metricsAddr,
|
||||||
Port: 9443,
|
Port: 9443,
|
||||||
@@ -164,6 +161,7 @@ func main() {
|
|||||||
SecretKeyBytes: []byte(webhookSecretToken),
|
SecretKeyBytes: []byte(webhookSecretToken),
|
||||||
Namespace: watchNamespace,
|
Namespace: watchNamespace,
|
||||||
GitHubClient: ghClient,
|
GitHubClient: ghClient,
|
||||||
|
QueueLimit: queueLimit,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = hraGitHubWebhook.SetupWithManager(mgr); err != nil {
|
if err = hraGitHubWebhook.SetupWithManager(mgr); err != nil {
|
||||||
|
|||||||
@@ -61,6 +61,16 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
githubAPICredentialsFrom:
|
||||||
|
properties:
|
||||||
|
secretRef:
|
||||||
|
properties:
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
maxReplicas:
|
maxReplicas:
|
||||||
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
description: MaxReplicas is the maximum number of replicas the deployment is allowed to scale
|
||||||
type: integer
|
type: integer
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -22,8 +22,6 @@ bases:
|
|||||||
- ../certmanager
|
- ../certmanager
|
||||||
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
|
||||||
#- ../prometheus
|
#- ../prometheus
|
||||||
# [GH_WEBHOOK_SERVER] To enable the GitHub webhook server, uncomment all sections with 'GH_WEBHOOK_SERVER'.
|
|
||||||
#- ../github-webhook-server
|
|
||||||
|
|
||||||
patchesStrategicMerge:
|
patchesStrategicMerge:
|
||||||
# Protect the /metrics endpoint by putting it behind auth.
|
# Protect the /metrics endpoint by putting it behind auth.
|
||||||
@@ -46,10 +44,6 @@ patchesStrategicMerge:
|
|||||||
# 'CERTMANAGER' needs to be enabled to use ca injection
|
# 'CERTMANAGER' needs to be enabled to use ca injection
|
||||||
- webhookcainjection_patch.yaml
|
- webhookcainjection_patch.yaml
|
||||||
|
|
||||||
# [GH_WEBHOOK_SERVER] To enable the GitHub webhook server, uncomment all sections with 'GH_WEBHOOK_SERVER'.
|
|
||||||
# Protect the GitHub webhook server metrics endpoint by putting it behind auth.
|
|
||||||
# - gh-webhook-server-auth-proxy-patch.yaml
|
|
||||||
|
|
||||||
# the following config is for teaching kustomize how to do var substitution
|
# the following config is for teaching kustomize how to do var substitution
|
||||||
vars:
|
vars:
|
||||||
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
|
||||||
|
|||||||
@@ -2,11 +2,14 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
|||||||
kind: Kustomization
|
kind: Kustomization
|
||||||
|
|
||||||
images:
|
images:
|
||||||
- name: controller
|
- name: controller
|
||||||
newName: summerwind/actions-runner-controller
|
newName: summerwind/actions-runner-controller
|
||||||
newTag: latest
|
newTag: latest
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
- deployment.yaml
|
- deployment.yaml
|
||||||
- rbac.yaml
|
- rbac.yaml
|
||||||
- service.yaml
|
- service.yaml
|
||||||
|
|
||||||
|
patchesStrategicMerge:
|
||||||
|
- gh-webhook-server-auth-proxy-patch.yaml
|
||||||
|
|||||||
@@ -249,3 +249,36 @@ rules:
|
|||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
- watch
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- serviceaccounts
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- rolebindings
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- rbac.authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- roles
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
|||||||
@@ -9,7 +9,10 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/google/go-github/v39/github"
|
arcgithub "github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
|
"github.com/google/go-github/v45/github"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -19,7 +22,7 @@ const (
|
|||||||
defaultScaleDownFactor = 0.7
|
defaultScaleDownFactor = 0.7
|
||||||
)
|
)
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(ghc *arcgithub.Client, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||||
if hra.Spec.MinReplicas == nil {
|
if hra.Spec.MinReplicas == nil {
|
||||||
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
|
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
|
||||||
} else if hra.Spec.MaxReplicas == nil {
|
} else if hra.Spec.MaxReplicas == nil {
|
||||||
@@ -46,9 +49,9 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTa
|
|||||||
|
|
||||||
switch primaryMetricType {
|
switch primaryMetricType {
|
||||||
case v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns:
|
case v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns:
|
||||||
suggested, err = r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, &primaryMetric)
|
suggested, err = r.suggestReplicasByQueuedAndInProgressWorkflowRuns(ghc, st, hra, &primaryMetric)
|
||||||
case v1alpha1.AutoscalingMetricTypePercentageRunnersBusy:
|
case v1alpha1.AutoscalingMetricTypePercentageRunnersBusy:
|
||||||
suggested, err = r.suggestReplicasByPercentageRunnersBusy(st, hra, primaryMetric)
|
suggested, err = r.suggestReplicasByPercentageRunnersBusy(ghc, st, hra, primaryMetric)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("validating autoscaling metrics: unsupported metric type %q", primaryMetric)
|
return nil, fmt.Errorf("validating autoscaling metrics: unsupported metric type %q", primaryMetric)
|
||||||
}
|
}
|
||||||
@@ -81,11 +84,10 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTa
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, &fallbackMetric)
|
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(ghc, st, hra, &fallbackMetric)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics *v1alpha1.MetricSpec) (*int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(ghc *arcgithub.Client, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics *v1alpha1.MetricSpec) (*int, error) {
|
||||||
|
|
||||||
var repos [][]string
|
var repos [][]string
|
||||||
repoID := st.repo
|
repoID := st.repo
|
||||||
if repoID == "" {
|
if repoID == "" {
|
||||||
@@ -124,7 +126,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
|||||||
opt := github.ListWorkflowJobsOptions{ListOptions: github.ListOptions{PerPage: 50}}
|
opt := github.ListWorkflowJobsOptions{ListOptions: github.ListOptions{PerPage: 50}}
|
||||||
var allJobs []*github.WorkflowJob
|
var allJobs []*github.WorkflowJob
|
||||||
for {
|
for {
|
||||||
jobs, resp, err := r.GitHubClient.Actions.ListWorkflowJobs(context.TODO(), user, repoName, runID, &opt)
|
jobs, resp, err := ghc.Actions.ListWorkflowJobs(context.TODO(), user, repoName, runID, &opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Log.Error(err, "Error listing workflow jobs")
|
r.Log.Error(err, "Error listing workflow jobs")
|
||||||
return //err
|
return //err
|
||||||
@@ -182,7 +184,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
|||||||
|
|
||||||
for _, repo := range repos {
|
for _, repo := range repos {
|
||||||
user, repoName := repo[0], repo[1]
|
user, repoName := repo[0], repo[1]
|
||||||
workflowRuns, err := r.GitHubClient.ListRepositoryWorkflowRuns(context.TODO(), user, repoName)
|
workflowRuns, err := ghc.ListRepositoryWorkflowRuns(context.TODO(), user, repoName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -224,7 +226,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
|||||||
return &necessaryReplicas, nil
|
return &necessaryReplicas, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics v1alpha1.MetricSpec) (*int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(ghc *arcgithub.Client, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics v1alpha1.MetricSpec) (*int, error) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
scaleUpThreshold := defaultScaleUpThreshold
|
scaleUpThreshold := defaultScaleUpThreshold
|
||||||
scaleDownThreshold := defaultScaleDownThreshold
|
scaleDownThreshold := defaultScaleDownThreshold
|
||||||
@@ -293,7 +295,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
|||||||
)
|
)
|
||||||
|
|
||||||
// ListRunners will return all runners managed by GitHub - not restricted to ns
|
// ListRunners will return all runners managed by GitHub - not restricted to ns
|
||||||
runners, err := r.GitHubClient.ListRunners(
|
runners, err := ghc.ListRunners(
|
||||||
ctx,
|
ctx,
|
||||||
enterprise,
|
enterprise,
|
||||||
organization,
|
organization,
|
||||||
@@ -314,22 +316,52 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
|||||||
numRunners int
|
numRunners int
|
||||||
numRunnersRegistered int
|
numRunnersRegistered int
|
||||||
numRunnersBusy int
|
numRunnersBusy int
|
||||||
|
numTerminatingBusy int
|
||||||
)
|
)
|
||||||
|
|
||||||
numRunners = len(runnerMap)
|
numRunners = len(runnerMap)
|
||||||
|
|
||||||
|
busyTerminatingRunnerPods := map[string]struct{}{}
|
||||||
|
|
||||||
|
kindLabel := LabelKeyRunnerDeploymentName
|
||||||
|
if hra.Spec.ScaleTargetRef.Kind == "RunnerSet" {
|
||||||
|
kindLabel = LabelKeyRunnerSetName
|
||||||
|
}
|
||||||
|
|
||||||
|
var runnerPodList corev1.PodList
|
||||||
|
if err := r.Client.List(ctx, &runnerPodList, client.InNamespace(hra.Namespace), client.MatchingLabels(map[string]string{
|
||||||
|
kindLabel: hra.Spec.ScaleTargetRef.Name,
|
||||||
|
})); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range runnerPodList.Items {
|
||||||
|
if p.Annotations[AnnotationKeyUnregistrationFailureMessage] != "" {
|
||||||
|
busyTerminatingRunnerPods[p.Name] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, runner := range runners {
|
for _, runner := range runners {
|
||||||
if _, ok := runnerMap[*runner.Name]; ok {
|
if _, ok := runnerMap[*runner.Name]; ok {
|
||||||
numRunnersRegistered++
|
numRunnersRegistered++
|
||||||
|
|
||||||
if runner.GetBusy() {
|
if runner.GetBusy() {
|
||||||
numRunnersBusy++
|
numRunnersBusy++
|
||||||
|
} else if _, ok := busyTerminatingRunnerPods[*runner.Name]; ok {
|
||||||
|
numTerminatingBusy++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
delete(busyTerminatingRunnerPods, *runner.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remaining busyTerminatingRunnerPods are runners that were not on the ListRunners API response yet
|
||||||
|
for range busyTerminatingRunnerPods {
|
||||||
|
numTerminatingBusy++
|
||||||
|
}
|
||||||
|
|
||||||
var desiredReplicas int
|
var desiredReplicas int
|
||||||
fractionBusy := float64(numRunnersBusy) / float64(desiredReplicasBefore)
|
fractionBusy := float64(numRunnersBusy+numTerminatingBusy) / float64(desiredReplicasBefore)
|
||||||
if fractionBusy >= scaleUpThreshold {
|
if fractionBusy >= scaleUpThreshold {
|
||||||
if scaleUpAdjustment > 0 {
|
if scaleUpAdjustment > 0 {
|
||||||
desiredReplicas = desiredReplicasBefore + scaleUpAdjustment
|
desiredReplicas = desiredReplicasBefore + scaleUpAdjustment
|
||||||
@@ -358,6 +390,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
|||||||
"num_runners", numRunners,
|
"num_runners", numRunners,
|
||||||
"num_runners_registered", numRunnersRegistered,
|
"num_runners_registered", numRunnersRegistered,
|
||||||
"num_runners_busy", numRunnersBusy,
|
"num_runners_busy", numRunnersBusy,
|
||||||
|
"num_terminating_busy", numTerminatingBusy,
|
||||||
"namespace", hra.Namespace,
|
"namespace", hra.Namespace,
|
||||||
"kind", st.kind,
|
"kind", st.kind,
|
||||||
"name", st.st,
|
"name", st.st,
|
||||||
|
|||||||
@@ -330,7 +330,6 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
|
|
||||||
h := &HorizontalRunnerAutoscalerReconciler{
|
h := &HorizontalRunnerAutoscalerReconciler{
|
||||||
Log: log,
|
Log: log,
|
||||||
GitHubClient: client,
|
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
DefaultScaleDownDelay: DefaultScaleDownDelay,
|
DefaultScaleDownDelay: DefaultScaleDownDelay,
|
||||||
}
|
}
|
||||||
@@ -379,7 +378,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
|
|
||||||
st := h.scaleTargetFromRD(context.Background(), rd)
|
st := h.scaleTargetFromRD(context.Background(), rd)
|
||||||
|
|
||||||
got, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
|
got, err := h.computeReplicasWithCache(client, log, metav1Now.Time, st, hra, minReplicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if tc.err == "" {
|
if tc.err == "" {
|
||||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||||
@@ -720,7 +719,6 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
h := &HorizontalRunnerAutoscalerReconciler{
|
h := &HorizontalRunnerAutoscalerReconciler{
|
||||||
Log: log,
|
Log: log,
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
GitHubClient: client,
|
|
||||||
DefaultScaleDownDelay: DefaultScaleDownDelay,
|
DefaultScaleDownDelay: DefaultScaleDownDelay,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -781,7 +779,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
|
|
||||||
st := h.scaleTargetFromRD(context.Background(), rd)
|
st := h.scaleTargetFromRD(context.Background(), rd)
|
||||||
|
|
||||||
got, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
|
got, err := h.computeReplicasWithCache(client, log, metav1Now.Time, st, hra, minReplicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if tc.err == "" {
|
if tc.err == "" {
|
||||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||||
|
|||||||
@@ -4,17 +4,22 @@ import "time"
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
LabelKeyRunnerSetName = "runnerset-name"
|
LabelKeyRunnerSetName = "runnerset-name"
|
||||||
|
LabelKeyRunner = "actions-runner"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// This names requires at least one slash to work.
|
// This names requires at least one slash to work.
|
||||||
// See https://github.com/google/knative-gcp/issues/378
|
// See https://github.com/google/knative-gcp/issues/378
|
||||||
runnerPodFinalizerName = "actions.summerwind.dev/runner-pod"
|
runnerPodFinalizerName = "actions.summerwind.dev/runner-pod"
|
||||||
|
runnerLinkedResourcesFinalizerName = "actions.summerwind.dev/linked-resources"
|
||||||
|
|
||||||
annotationKeyPrefix = "actions-runner/"
|
annotationKeyPrefix = "actions-runner/"
|
||||||
|
|
||||||
AnnotationKeyLastRegistrationCheckTime = "actions-runner-controller/last-registration-check-time"
|
AnnotationKeyLastRegistrationCheckTime = "actions-runner-controller/last-registration-check-time"
|
||||||
|
|
||||||
|
// AnnotationKeyUnregistrationFailureMessage is the annotation that is added onto the pod once it failed to be unregistered from GitHub due to e.g. 422 error
|
||||||
|
AnnotationKeyUnregistrationFailureMessage = annotationKeyPrefix + "unregistration-failure-message"
|
||||||
|
|
||||||
// AnnotationKeyUnregistrationCompleteTimestamp is the annotation that is added onto the pod once the previously started unregistration process has been completed.
|
// AnnotationKeyUnregistrationCompleteTimestamp is the annotation that is added onto the pod once the previously started unregistration process has been completed.
|
||||||
AnnotationKeyUnregistrationCompleteTimestamp = annotationKeyPrefix + "unregistration-complete-timestamp"
|
AnnotationKeyUnregistrationCompleteTimestamp = annotationKeyPrefix + "unregistration-complete-timestamp"
|
||||||
|
|
||||||
@@ -61,4 +66,7 @@ const (
|
|||||||
|
|
||||||
EnvVarRunnerName = "RUNNER_NAME"
|
EnvVarRunnerName = "RUNNER_NAME"
|
||||||
EnvVarRunnerToken = "RUNNER_TOKEN"
|
EnvVarRunnerToken = "RUNNER_TOKEN"
|
||||||
|
|
||||||
|
// defaultHookPath is path to the hook script used when the "containerMode: kubernetes" is specified
|
||||||
|
defaultRunnerHookPath = "/runner/k8s/index.js"
|
||||||
)
|
)
|
||||||
|
|||||||
207
controllers/horizontal_runner_autoscaler_batch_scale.go
Normal file
207
controllers/horizontal_runner_autoscaler_batch_scale.go
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
type batchScaler struct {
|
||||||
|
Ctx context.Context
|
||||||
|
Client client.Client
|
||||||
|
Log logr.Logger
|
||||||
|
interval time.Duration
|
||||||
|
|
||||||
|
queue chan *ScaleTarget
|
||||||
|
workerStart sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBatchScaler(ctx context.Context, client client.Client, log logr.Logger) *batchScaler {
|
||||||
|
return &batchScaler{
|
||||||
|
Ctx: ctx,
|
||||||
|
Client: client,
|
||||||
|
Log: log,
|
||||||
|
interval: 3 * time.Second,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type batchScaleOperation struct {
|
||||||
|
namespacedName types.NamespacedName
|
||||||
|
scaleOps []scaleOperation
|
||||||
|
}
|
||||||
|
|
||||||
|
type scaleOperation struct {
|
||||||
|
trigger v1alpha1.ScaleUpTrigger
|
||||||
|
log logr.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the scale target to the unbounded queue, blocking until the target is successfully added to the queue.
|
||||||
|
// All the targets in the queue are dequeued every 3 seconds, grouped by the HRA, and applied.
|
||||||
|
// In a happy path, batchScaler update each HRA only once, even though the HRA had two or more associated webhook events in the 3 seconds interval,
|
||||||
|
// which results in less K8s API calls and less HRA update conflicts in case your ARC installation receives a lot of webhook events
|
||||||
|
func (s *batchScaler) Add(st *ScaleTarget) {
|
||||||
|
if st == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.workerStart.Do(func() {
|
||||||
|
var expBackoff = []time.Duration{time.Second, 2 * time.Second, 4 * time.Second, 8 * time.Second, 16 * time.Second}
|
||||||
|
|
||||||
|
s.queue = make(chan *ScaleTarget)
|
||||||
|
|
||||||
|
log := s.Log
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
log.Info("Starting batch worker")
|
||||||
|
defer log.Info("Stopped batch worker")
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-s.Ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
log.V(2).Info("Batch worker is dequeueing operations")
|
||||||
|
|
||||||
|
batches := map[types.NamespacedName]batchScaleOperation{}
|
||||||
|
after := time.After(s.interval)
|
||||||
|
var ops uint
|
||||||
|
|
||||||
|
batch:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-after:
|
||||||
|
after = nil
|
||||||
|
break batch
|
||||||
|
case st := <-s.queue:
|
||||||
|
nsName := types.NamespacedName{
|
||||||
|
Namespace: st.HorizontalRunnerAutoscaler.Namespace,
|
||||||
|
Name: st.HorizontalRunnerAutoscaler.Name,
|
||||||
|
}
|
||||||
|
b, ok := batches[nsName]
|
||||||
|
if !ok {
|
||||||
|
b = batchScaleOperation{
|
||||||
|
namespacedName: nsName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.scaleOps = append(b.scaleOps, scaleOperation{
|
||||||
|
log: *st.log,
|
||||||
|
trigger: st.ScaleUpTrigger,
|
||||||
|
})
|
||||||
|
batches[nsName] = b
|
||||||
|
ops++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.V(2).Info("Batch worker dequeued operations", "ops", ops, "batches", len(batches))
|
||||||
|
|
||||||
|
retry:
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
failed := map[types.NamespacedName]batchScaleOperation{}
|
||||||
|
|
||||||
|
for nsName, b := range batches {
|
||||||
|
b := b
|
||||||
|
if err := s.batchScale(context.Background(), b); err != nil {
|
||||||
|
log.V(2).Info("Failed to scale due to error", "error", err)
|
||||||
|
failed[nsName] = b
|
||||||
|
} else {
|
||||||
|
log.V(2).Info("Successfully ran batch scale", "hra", b.namespacedName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(failed) == 0 {
|
||||||
|
break retry
|
||||||
|
}
|
||||||
|
|
||||||
|
batches = failed
|
||||||
|
|
||||||
|
delay := 16 * time.Second
|
||||||
|
if i < len(expBackoff) {
|
||||||
|
delay = expBackoff[i]
|
||||||
|
}
|
||||||
|
time.Sleep(delay)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
})
|
||||||
|
|
||||||
|
s.queue <- st
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *batchScaler) batchScale(ctx context.Context, batch batchScaleOperation) error {
|
||||||
|
var hra v1alpha1.HorizontalRunnerAutoscaler
|
||||||
|
|
||||||
|
if err := s.Client.Get(ctx, batch.namespacedName, &hra); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
copy := hra.DeepCopy()
|
||||||
|
|
||||||
|
copy.Spec.CapacityReservations = getValidCapacityReservations(copy)
|
||||||
|
|
||||||
|
var added, completed int
|
||||||
|
|
||||||
|
for _, scale := range batch.scaleOps {
|
||||||
|
amount := 1
|
||||||
|
|
||||||
|
if scale.trigger.Amount != 0 {
|
||||||
|
amount = scale.trigger.Amount
|
||||||
|
}
|
||||||
|
|
||||||
|
scale.log.V(2).Info("Adding capacity reservation", "amount", amount)
|
||||||
|
|
||||||
|
if amount > 0 {
|
||||||
|
now := time.Now()
|
||||||
|
copy.Spec.CapacityReservations = append(copy.Spec.CapacityReservations, v1alpha1.CapacityReservation{
|
||||||
|
EffectiveTime: metav1.Time{Time: now},
|
||||||
|
ExpirationTime: metav1.Time{Time: now.Add(scale.trigger.Duration.Duration)},
|
||||||
|
Replicas: amount,
|
||||||
|
})
|
||||||
|
|
||||||
|
added += amount
|
||||||
|
} else if amount < 0 {
|
||||||
|
var reservations []v1alpha1.CapacityReservation
|
||||||
|
|
||||||
|
var found bool
|
||||||
|
|
||||||
|
for _, r := range copy.Spec.CapacityReservations {
|
||||||
|
if !found && r.Replicas+amount == 0 {
|
||||||
|
found = true
|
||||||
|
} else {
|
||||||
|
reservations = append(reservations, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
copy.Spec.CapacityReservations = reservations
|
||||||
|
|
||||||
|
completed += amount
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
before := len(hra.Spec.CapacityReservations)
|
||||||
|
expired := before - len(copy.Spec.CapacityReservations)
|
||||||
|
after := len(copy.Spec.CapacityReservations)
|
||||||
|
|
||||||
|
s.Log.V(1).Info(
|
||||||
|
fmt.Sprintf("Updating hra %s for capacityReservations update", hra.Name),
|
||||||
|
"before", before,
|
||||||
|
"expired", expired,
|
||||||
|
"added", added,
|
||||||
|
"completed", completed,
|
||||||
|
"after", after,
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := s.Client.Update(ctx, copy); err != nil {
|
||||||
|
return fmt.Errorf("updating horizontalrunnerautoscaler to add capacity reservation: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -23,14 +23,14 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
gogithub "github.com/google/go-github/v39/github"
|
gogithub "github.com/google/go-github/v45/github"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
@@ -46,6 +46,8 @@ const (
|
|||||||
|
|
||||||
keyPrefixEnterprise = "enterprises/"
|
keyPrefixEnterprise = "enterprises/"
|
||||||
keyRunnerGroup = "/group/"
|
keyRunnerGroup = "/group/"
|
||||||
|
|
||||||
|
DefaultQueueLimit = 100
|
||||||
)
|
)
|
||||||
|
|
||||||
// HorizontalRunnerAutoscalerGitHubWebhook autoscales a HorizontalRunnerAutoscaler and the RunnerDeployment on each
|
// HorizontalRunnerAutoscalerGitHubWebhook autoscales a HorizontalRunnerAutoscaler and the RunnerDeployment on each
|
||||||
@@ -68,6 +70,15 @@ type HorizontalRunnerAutoscalerGitHubWebhook struct {
|
|||||||
// Set to empty for letting it watch for all namespaces.
|
// Set to empty for letting it watch for all namespaces.
|
||||||
Namespace string
|
Namespace string
|
||||||
Name string
|
Name string
|
||||||
|
|
||||||
|
// QueueLimit is the maximum length of the bounded queue of scale targets and their associated operations
|
||||||
|
// A scale target is enqueued on each retrieval of each eligible webhook event, so that it is processed asynchronously.
|
||||||
|
QueueLimit int
|
||||||
|
|
||||||
|
worker *worker
|
||||||
|
workerInit sync.Once
|
||||||
|
workerStart sync.Once
|
||||||
|
batchCh chan *ScaleTarget
|
||||||
}
|
}
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||||
@@ -312,9 +323,19 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := autoscaler.tryScale(context.TODO(), target); err != nil {
|
autoscaler.workerInit.Do(func() {
|
||||||
log.Error(err, "could not scale up")
|
batchScaler := newBatchScaler(context.Background(), autoscaler.Client, autoscaler.Log)
|
||||||
|
|
||||||
|
queueLimit := autoscaler.QueueLimit
|
||||||
|
if queueLimit == 0 {
|
||||||
|
queueLimit = DefaultQueueLimit
|
||||||
|
}
|
||||||
|
autoscaler.worker = newWorker(context.Background(), queueLimit, batchScaler.Add)
|
||||||
|
})
|
||||||
|
|
||||||
|
target.log = &log
|
||||||
|
if ok := autoscaler.worker.Add(target); !ok {
|
||||||
|
log.Error(err, "Could not scale up due to queue full")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -383,6 +404,8 @@ func matchTriggerConditionAgainstEvent(types []string, eventAction *string) bool
|
|||||||
type ScaleTarget struct {
|
type ScaleTarget struct {
|
||||||
v1alpha1.HorizontalRunnerAutoscaler
|
v1alpha1.HorizontalRunnerAutoscaler
|
||||||
v1alpha1.ScaleUpTrigger
|
v1alpha1.ScaleUpTrigger
|
||||||
|
|
||||||
|
log *logr.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) searchScaleTargets(hras []v1alpha1.HorizontalRunnerAutoscaler, f func(v1alpha1.ScaleUpTrigger) bool) []ScaleTarget {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) searchScaleTargets(hras []v1alpha1.HorizontalRunnerAutoscaler, f func(v1alpha1.ScaleUpTrigger) bool) []ScaleTarget {
|
||||||
@@ -501,6 +524,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleUpTargetWithF
|
|||||||
if autoscaler.GitHubClient != nil {
|
if autoscaler.GitHubClient != nil {
|
||||||
simu := &simulator.Simulator{
|
simu := &simulator.Simulator{
|
||||||
Client: autoscaler.GitHubClient,
|
Client: autoscaler.GitHubClient,
|
||||||
|
Log: log,
|
||||||
}
|
}
|
||||||
// Get available organization runner groups and enterprise runner groups for a repository
|
// Get available organization runner groups and enterprise runner groups for a repository
|
||||||
// These are the sum of runner groups with repository access = All repositories and runner groups
|
// These are the sum of runner groups with repository access = All repositories and runner groups
|
||||||
@@ -770,63 +794,6 @@ HRA:
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) tryScale(ctx context.Context, target *ScaleTarget) error {
|
|
||||||
if target == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
copy := target.HorizontalRunnerAutoscaler.DeepCopy()
|
|
||||||
|
|
||||||
amount := 1
|
|
||||||
|
|
||||||
if target.ScaleUpTrigger.Amount != 0 {
|
|
||||||
amount = target.ScaleUpTrigger.Amount
|
|
||||||
}
|
|
||||||
|
|
||||||
capacityReservations := getValidCapacityReservations(copy)
|
|
||||||
|
|
||||||
if amount > 0 {
|
|
||||||
now := time.Now()
|
|
||||||
copy.Spec.CapacityReservations = append(capacityReservations, v1alpha1.CapacityReservation{
|
|
||||||
EffectiveTime: metav1.Time{Time: now},
|
|
||||||
ExpirationTime: metav1.Time{Time: now.Add(target.ScaleUpTrigger.Duration.Duration)},
|
|
||||||
Replicas: amount,
|
|
||||||
})
|
|
||||||
} else if amount < 0 {
|
|
||||||
var reservations []v1alpha1.CapacityReservation
|
|
||||||
|
|
||||||
var found bool
|
|
||||||
|
|
||||||
for _, r := range capacityReservations {
|
|
||||||
if !found && r.Replicas+amount == 0 {
|
|
||||||
found = true
|
|
||||||
} else {
|
|
||||||
reservations = append(reservations, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
copy.Spec.CapacityReservations = reservations
|
|
||||||
}
|
|
||||||
|
|
||||||
before := len(target.HorizontalRunnerAutoscaler.Spec.CapacityReservations)
|
|
||||||
expired := before - len(capacityReservations)
|
|
||||||
after := len(copy.Spec.CapacityReservations)
|
|
||||||
|
|
||||||
autoscaler.Log.V(1).Info(
|
|
||||||
fmt.Sprintf("Patching hra %s for capacityReservations update", target.HorizontalRunnerAutoscaler.Name),
|
|
||||||
"before", before,
|
|
||||||
"expired", expired,
|
|
||||||
"amount", amount,
|
|
||||||
"after", after,
|
|
||||||
)
|
|
||||||
|
|
||||||
if err := autoscaler.Client.Patch(ctx, copy, client.MergeFrom(&target.HorizontalRunnerAutoscaler)); err != nil {
|
|
||||||
return fmt.Errorf("patching horizontalrunnerautoscaler to add capacity reservation: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getValidCapacityReservations(autoscaler *v1alpha1.HorizontalRunnerAutoscaler) []v1alpha1.CapacityReservation {
|
func getValidCapacityReservations(autoscaler *v1alpha1.HorizontalRunnerAutoscaler) []v1alpha1.CapacityReservation {
|
||||||
var capacityReservations []v1alpha1.CapacityReservation
|
var capacityReservations []v1alpha1.CapacityReservation
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package controllers
|
|||||||
import (
|
import (
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/pkg/actionsglob"
|
"github.com/actions-runner-controller/actions-runner-controller/pkg/actionsglob"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPullRequestEvent(event *github.PullRequestEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPullRequestEvent(event *github.PullRequestEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event *github.PushEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event *github.PushEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
|
|
||||||
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
|
|||||||
55
controllers/horizontal_runner_autoscaler_webhook_worker.go
Normal file
55
controllers/horizontal_runner_autoscaler_webhook_worker.go
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// worker is a worker that has a non-blocking bounded queue of scale targets, dequeues scale target and executes the scale operation one by one.
|
||||||
|
type worker struct {
|
||||||
|
scaleTargetQueue chan *ScaleTarget
|
||||||
|
work func(*ScaleTarget)
|
||||||
|
done chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newWorker(ctx context.Context, queueLimit int, work func(*ScaleTarget)) *worker {
|
||||||
|
w := &worker{
|
||||||
|
scaleTargetQueue: make(chan *ScaleTarget, queueLimit),
|
||||||
|
work: work,
|
||||||
|
done: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer close(w.done)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case t := <-w.scaleTargetQueue:
|
||||||
|
work(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the scale target to the bounded queue, returning the result as a bool value. It returns true on successful enqueue, and returns false otherwise.
|
||||||
|
// When returned false, the queue is already full so the enqueue operation must be retried later.
|
||||||
|
// If the enqueue was triggered by an external source and there's no intermediate queue that we can use,
|
||||||
|
// you must instruct the source to resend the original request later.
|
||||||
|
// In case you're building a webhook server around this worker, this means that you must return a http error to the webhook server,
|
||||||
|
// so that (hopefully) the sender can resend the webhook event later, or at least the human operator can notice or be notified about the
|
||||||
|
// webhook develiery failure so that a manual retry can be done later.
|
||||||
|
func (w *worker) Add(st *ScaleTarget) bool {
|
||||||
|
select {
|
||||||
|
case w.scaleTargetQueue <- st:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *worker) Done() chan struct{} {
|
||||||
|
return w.done
|
||||||
|
}
|
||||||
@@ -0,0 +1,36 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWorker_Add(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
w := newWorker(ctx, 2, func(st *ScaleTarget) {})
|
||||||
|
require.True(t, w.Add(&ScaleTarget{}))
|
||||||
|
require.True(t, w.Add(&ScaleTarget{}))
|
||||||
|
require.False(t, w.Add(&ScaleTarget{}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWorker_Work(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var count int
|
||||||
|
|
||||||
|
w := newWorker(ctx, 1, func(st *ScaleTarget) {
|
||||||
|
count++
|
||||||
|
cancel()
|
||||||
|
})
|
||||||
|
require.True(t, w.Add(&ScaleTarget{}))
|
||||||
|
require.False(t, w.Add(&ScaleTarget{}))
|
||||||
|
|
||||||
|
<-w.Done()
|
||||||
|
|
||||||
|
require.Equal(t, count, 1)
|
||||||
|
}
|
||||||
@@ -24,7 +24,6 @@ import (
|
|||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@@ -38,6 +37,7 @@ import (
|
|||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/controllers/metrics"
|
"github.com/actions-runner-controller/actions-runner-controller/controllers/metrics"
|
||||||
|
arcgithub "github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -47,11 +47,10 @@ const (
|
|||||||
// HorizontalRunnerAutoscalerReconciler reconciles a HorizontalRunnerAutoscaler object
|
// HorizontalRunnerAutoscalerReconciler reconciles a HorizontalRunnerAutoscaler object
|
||||||
type HorizontalRunnerAutoscalerReconciler struct {
|
type HorizontalRunnerAutoscalerReconciler struct {
|
||||||
client.Client
|
client.Client
|
||||||
GitHubClient *github.Client
|
GitHubClient *MultiGitHubClient
|
||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
CacheDuration time.Duration
|
|
||||||
DefaultScaleDownDelay time.Duration
|
DefaultScaleDownDelay time.Duration
|
||||||
Name string
|
Name string
|
||||||
}
|
}
|
||||||
@@ -73,6 +72,8 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
|
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
r.GitHubClient.DeinitForHRA(&hra)
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -310,7 +311,12 @@ func (r *HorizontalRunnerAutoscalerReconciler) reconcile(ctx context.Context, re
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
newDesiredReplicas, err := r.computeReplicasWithCache(log, now, st, hra, minReplicas)
|
ghc, err := r.GitHubClient.InitForHRA(context.Background(), &hra)
|
||||||
|
if err != nil {
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
newDesiredReplicas, err := r.computeReplicasWithCache(ghc, log, now, st, hra, minReplicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||||
|
|
||||||
@@ -461,10 +467,10 @@ func (r *HorizontalRunnerAutoscalerReconciler) getMinReplicas(log logr.Logger, n
|
|||||||
return minReplicas, active, upcoming, nil
|
return minReplicas, active, upcoming, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr.Logger, now time.Time, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, minReplicas int) (int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(ghc *arcgithub.Client, log logr.Logger, now time.Time, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, minReplicas int) (int, error) {
|
||||||
var suggestedReplicas int
|
var suggestedReplicas int
|
||||||
|
|
||||||
v, err := r.suggestDesiredReplicas(st, hra)
|
v, err := r.suggestDesiredReplicas(ghc, st, hra)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
github2 "github.com/actions-runner-controller/actions-runner-controller/github"
|
github2 "github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
||||||
|
|
||||||
@@ -99,12 +99,14 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
|
|||||||
return fmt.Sprintf("%s%s", ns.Name, name)
|
return fmt.Sprintf("%s%s", ns.Name, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
multiClient := NewMultiGitHubClient(mgr.GetClient(), env.ghClient)
|
||||||
|
|
||||||
runnerController := &RunnerReconciler{
|
runnerController := &RunnerReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Scheme: scheme.Scheme,
|
Scheme: scheme.Scheme,
|
||||||
Log: logf.Log,
|
Log: logf.Log,
|
||||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||||
GitHubClient: env.ghClient,
|
GitHubClient: multiClient,
|
||||||
RunnerImage: "example/runner:test",
|
RunnerImage: "example/runner:test",
|
||||||
DockerImage: "example/docker:test",
|
DockerImage: "example/docker:test",
|
||||||
Name: controllerName("runner"),
|
Name: controllerName("runner"),
|
||||||
@@ -116,12 +118,11 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
|
|||||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller")
|
Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller")
|
||||||
|
|
||||||
replicasetController := &RunnerReplicaSetReconciler{
|
replicasetController := &RunnerReplicaSetReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Scheme: scheme.Scheme,
|
Scheme: scheme.Scheme,
|
||||||
Log: logf.Log,
|
Log: logf.Log,
|
||||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||||
GitHubClient: env.ghClient,
|
Name: controllerName("runnerreplicaset"),
|
||||||
Name: controllerName("runnerreplicaset"),
|
|
||||||
}
|
}
|
||||||
err = replicasetController.SetupWithManager(mgr)
|
err = replicasetController.SetupWithManager(mgr)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerreplicaset controller")
|
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerreplicaset controller")
|
||||||
@@ -137,13 +138,12 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
|
|||||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerdeployment controller")
|
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerdeployment controller")
|
||||||
|
|
||||||
autoscalerController := &HorizontalRunnerAutoscalerReconciler{
|
autoscalerController := &HorizontalRunnerAutoscalerReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Scheme: scheme.Scheme,
|
Scheme: scheme.Scheme,
|
||||||
Log: logf.Log,
|
Log: logf.Log,
|
||||||
GitHubClient: env.ghClient,
|
GitHubClient: multiClient,
|
||||||
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
|
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
|
||||||
CacheDuration: 1 * time.Second,
|
Name: controllerName("horizontalrunnerautoscaler"),
|
||||||
Name: controllerName("horizontalrunnerautoscaler"),
|
|
||||||
}
|
}
|
||||||
err = autoscalerController.SetupWithManager(mgr)
|
err = autoscalerController.SetupWithManager(mgr)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to setup autoscaler controller")
|
Expect(err).NotTo(HaveOccurred(), "failed to setup autoscaler controller")
|
||||||
@@ -1367,7 +1367,7 @@ func (env *testEnvironment) ExpectRegisteredNumberCountEventuallyEquals(want int
|
|||||||
|
|
||||||
return len(rs)
|
return len(rs)
|
||||||
},
|
},
|
||||||
time.Second*5, time.Millisecond*500).Should(Equal(want), optionalDescriptions...)
|
time.Second*10, time.Millisecond*500).Should(Equal(want), optionalDescriptions...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (env *testEnvironment) SendOrgPullRequestEvent(org, repo, branch, action string) {
|
func (env *testEnvironment) SendOrgPullRequestEvent(org, repo, branch, action string) {
|
||||||
|
|||||||
389
controllers/multi_githubclient.go
Normal file
389
controllers/multi_githubclient.go
Normal file
@@ -0,0 +1,389 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// The api creds scret annotation is added by the runner controller or the runnerset controller according to runner.spec.githubAPICredentialsFrom.secretRef.name,
|
||||||
|
// so that the runner pod controller can share the same GitHub API credentials and the instance of the GitHub API client with the upstream controllers.
|
||||||
|
annotationKeyGitHubAPICredsSecret = annotationKeyPrefix + "github-api-creds-secret"
|
||||||
|
)
|
||||||
|
|
||||||
|
type runnerOwnerRef struct {
|
||||||
|
// kind is either StatefulSet or Runner, and populated via the owner reference in the runner pod controller or via the reconcilation target's kind in
|
||||||
|
// runnerset and runner controllers.
|
||||||
|
kind string
|
||||||
|
ns, name string
|
||||||
|
}
|
||||||
|
|
||||||
|
type secretRef struct {
|
||||||
|
ns, name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// savedClient is the each cache entry that contains the client for the specific set of credentials,
|
||||||
|
// like a PAT or a pair of key and cert.
|
||||||
|
// the `hash` is a part of the savedClient not the key because we are going to keep only the client for the latest creds
|
||||||
|
// in case the operator updated the k8s secret containing the credentials.
|
||||||
|
type savedClient struct {
|
||||||
|
hash string
|
||||||
|
|
||||||
|
// refs is the map of all the objects that references this client, used for reference counting to gc
|
||||||
|
// the client if unneeded.
|
||||||
|
refs map[runnerOwnerRef]struct{}
|
||||||
|
|
||||||
|
*github.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
type resourceReader interface {
|
||||||
|
Get(context.Context, types.NamespacedName, client.Object) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type MultiGitHubClient struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
|
||||||
|
client resourceReader
|
||||||
|
|
||||||
|
githubClient *github.Client
|
||||||
|
|
||||||
|
// The saved client is freed once all its dependents disappear, or the contents of the secret changed.
|
||||||
|
// We track dependents via a golang map embedded within the savedClient struct. Each dependent is checked on their respective Kubernetes finalizer,
|
||||||
|
// so that we won't miss any dependent's termination.
|
||||||
|
// The change is the secret is determined using the hash of its contents.
|
||||||
|
clients map[secretRef]savedClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMultiGitHubClient(client resourceReader, githubClient *github.Client) *MultiGitHubClient {
|
||||||
|
return &MultiGitHubClient{
|
||||||
|
client: client,
|
||||||
|
githubClient: githubClient,
|
||||||
|
clients: map[secretRef]savedClient{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init sets up and return the *github.Client for the object.
|
||||||
|
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
|
||||||
|
func (c *MultiGitHubClient) InitForRunnerPod(ctx context.Context, pod *corev1.Pod) (*github.Client, error) {
|
||||||
|
// These 3 default values are used only when the user created the pod directly, not via Runner, RunnerReplicaSet, RunnerDeploment, or RunnerSet resources.
|
||||||
|
ref := refFromRunnerPod(pod)
|
||||||
|
secretName := pod.Annotations[annotationKeyGitHubAPICredsSecret]
|
||||||
|
|
||||||
|
// kind can be any of Pod, Runner, RunnerReplicaSet, RunnerDeployment, or RunnerSet depending on which custom resource the user directly created.
|
||||||
|
return c.initClientWithSecretName(ctx, pod.Namespace, secretName, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init sets up and return the *github.Client for the object.
|
||||||
|
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
|
||||||
|
func (c *MultiGitHubClient) InitForRunner(ctx context.Context, r *v1alpha1.Runner) (*github.Client, error) {
|
||||||
|
var secretName string
|
||||||
|
if r.Spec.GitHubAPICredentialsFrom != nil {
|
||||||
|
secretName = r.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// These 3 default values are used only when the user created the runner resource directly, not via RunnerReplicaSet, RunnerDeploment, or RunnerSet resources.
|
||||||
|
ref := refFromRunner(r)
|
||||||
|
if ref.ns != r.Namespace {
|
||||||
|
return nil, fmt.Errorf("referencing github api creds secret from owner in another namespace is not supported yet")
|
||||||
|
}
|
||||||
|
|
||||||
|
// kind can be any of Runner, RunnerReplicaSet, or RunnerDeployment depending on which custom resource the user directly created.
|
||||||
|
return c.initClientWithSecretName(ctx, r.Namespace, secretName, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init sets up and return the *github.Client for the object.
|
||||||
|
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
|
||||||
|
func (c *MultiGitHubClient) InitForRunnerSet(ctx context.Context, rs *v1alpha1.RunnerSet) (*github.Client, error) {
|
||||||
|
ref := refFromRunnerSet(rs)
|
||||||
|
|
||||||
|
var secretName string
|
||||||
|
if rs.Spec.GitHubAPICredentialsFrom != nil {
|
||||||
|
secretName = rs.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.initClientWithSecretName(ctx, rs.Namespace, secretName, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init sets up and return the *github.Client for the object.
|
||||||
|
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
|
||||||
|
func (c *MultiGitHubClient) InitForHRA(ctx context.Context, hra *v1alpha1.HorizontalRunnerAutoscaler) (*github.Client, error) {
|
||||||
|
ref := refFromHorizontalRunnerAutoscaler(hra)
|
||||||
|
|
||||||
|
var secretName string
|
||||||
|
if hra.Spec.GitHubAPICredentialsFrom != nil {
|
||||||
|
secretName = hra.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.initClientWithSecretName(ctx, hra.Namespace, secretName, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) DeinitForRunnerPod(p *corev1.Pod) {
|
||||||
|
secretName := p.Annotations[annotationKeyGitHubAPICredsSecret]
|
||||||
|
c.derefClient(p.Namespace, secretName, refFromRunnerPod(p))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) DeinitForRunner(r *v1alpha1.Runner) {
|
||||||
|
var secretName string
|
||||||
|
if r.Spec.GitHubAPICredentialsFrom != nil {
|
||||||
|
secretName = r.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
c.derefClient(r.Namespace, secretName, refFromRunner(r))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) DeinitForRunnerSet(rs *v1alpha1.RunnerSet) {
|
||||||
|
var secretName string
|
||||||
|
if rs.Spec.GitHubAPICredentialsFrom != nil {
|
||||||
|
secretName = rs.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
c.derefClient(rs.Namespace, secretName, refFromRunnerSet(rs))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) deinitClientForRunnerReplicaSet(rs *v1alpha1.RunnerReplicaSet) {
|
||||||
|
c.derefClient(rs.Namespace, rs.Spec.Template.Spec.GitHubAPICredentialsFrom.SecretRef.Name, refFromRunnerReplicaSet(rs))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) deinitClientForRunnerDeployment(rd *v1alpha1.RunnerDeployment) {
|
||||||
|
c.derefClient(rd.Namespace, rd.Spec.Template.Spec.GitHubAPICredentialsFrom.SecretRef.Name, refFromRunnerDeployment(rd))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) DeinitForHRA(hra *v1alpha1.HorizontalRunnerAutoscaler) {
|
||||||
|
var secretName string
|
||||||
|
if hra.Spec.GitHubAPICredentialsFrom != nil {
|
||||||
|
secretName = hra.Spec.GitHubAPICredentialsFrom.SecretRef.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
c.derefClient(hra.Namespace, secretName, refFromHorizontalRunnerAutoscaler(hra))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) initClientForSecret(secret *corev1.Secret, dependent *runnerOwnerRef) (*savedClient, error) {
|
||||||
|
secRef := secretRef{
|
||||||
|
ns: secret.Namespace,
|
||||||
|
name: secret.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
cliRef := c.clients[secRef]
|
||||||
|
|
||||||
|
var ks []string
|
||||||
|
|
||||||
|
for k := range secret.Data {
|
||||||
|
ks = append(ks, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.SliceStable(ks, func(i, j int) bool { return ks[i] < ks[j] })
|
||||||
|
|
||||||
|
hash := sha1.New()
|
||||||
|
for _, k := range ks {
|
||||||
|
hash.Write(secret.Data[k])
|
||||||
|
}
|
||||||
|
hashStr := hex.EncodeToString(hash.Sum(nil))
|
||||||
|
|
||||||
|
if cliRef.hash != hashStr {
|
||||||
|
delete(c.clients, secRef)
|
||||||
|
|
||||||
|
conf, err := secretDataToGitHubClientConfig(secret.Data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cli, err := conf.NewClient()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cliRef = savedClient{
|
||||||
|
hash: hashStr,
|
||||||
|
refs: map[runnerOwnerRef]struct{}{},
|
||||||
|
Client: cli,
|
||||||
|
}
|
||||||
|
|
||||||
|
c.clients[secRef] = cliRef
|
||||||
|
}
|
||||||
|
|
||||||
|
if dependent != nil {
|
||||||
|
c.clients[secRef].refs[*dependent] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cliRef, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) initClientWithSecretName(ctx context.Context, ns, secretName string, runRef *runnerOwnerRef) (*github.Client, error) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
if secretName == "" {
|
||||||
|
return c.githubClient, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
secRef := secretRef{
|
||||||
|
ns: ns,
|
||||||
|
name: secretName,
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := c.clients[secRef]; !ok {
|
||||||
|
c.clients[secRef] = savedClient{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var sec corev1.Secret
|
||||||
|
if err := c.client.Get(ctx, types.NamespacedName{Namespace: ns, Name: secretName}, &sec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
savedClient, err := c.initClientForSecret(&sec, runRef)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return savedClient.Client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MultiGitHubClient) derefClient(ns, secretName string, dependent *runnerOwnerRef) {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
secRef := secretRef{
|
||||||
|
ns: ns,
|
||||||
|
name: secretName,
|
||||||
|
}
|
||||||
|
|
||||||
|
if dependent != nil {
|
||||||
|
delete(c.clients[secRef].refs, *dependent)
|
||||||
|
}
|
||||||
|
|
||||||
|
cliRef := c.clients[secRef]
|
||||||
|
|
||||||
|
if dependent == nil || len(cliRef.refs) == 0 {
|
||||||
|
delete(c.clients, secRef)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeBase64(s []byte) (string, error) {
|
||||||
|
enc := base64.RawStdEncoding
|
||||||
|
dbuf := make([]byte, enc.DecodedLen(len(s)))
|
||||||
|
n, err := enc.Decode(dbuf, []byte(s))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(dbuf[:n]), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func secretDataToGitHubClientConfig(data map[string][]byte) (*github.Config, error) {
|
||||||
|
var (
|
||||||
|
conf github.Config
|
||||||
|
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
conf.URL, err = decodeBase64(data["github_url"])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
conf.UploadURL, err = decodeBase64(data["github_upload_url"])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
conf.EnterpriseURL, err = decodeBase64(data["github_enterprise_url"])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
conf.RunnerGitHubURL, err = decodeBase64(data["github_runner_url"])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
conf.Token, err = decodeBase64(data["github_token"])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
appID, err := decodeBase64(data["github_app_id"])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
conf.AppID, err = strconv.ParseInt(appID, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
instID, err := decodeBase64(data["github_app_installation_id"])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
conf.AppInstallationID, err = strconv.ParseInt(instID, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
conf.AppPrivateKey, err = decodeBase64(data["github_app_private_key"])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &conf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func refFromRunnerDeployment(rd *v1alpha1.RunnerDeployment) *runnerOwnerRef {
|
||||||
|
return &runnerOwnerRef{
|
||||||
|
kind: rd.Kind,
|
||||||
|
ns: rd.Namespace,
|
||||||
|
name: rd.Name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func refFromRunnerReplicaSet(rs *v1alpha1.RunnerReplicaSet) *runnerOwnerRef {
|
||||||
|
return &runnerOwnerRef{
|
||||||
|
kind: rs.Kind,
|
||||||
|
ns: rs.Namespace,
|
||||||
|
name: rs.Name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func refFromRunner(r *v1alpha1.Runner) *runnerOwnerRef {
|
||||||
|
return &runnerOwnerRef{
|
||||||
|
kind: r.Kind,
|
||||||
|
ns: r.Namespace,
|
||||||
|
name: r.Name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func refFromRunnerPod(po *corev1.Pod) *runnerOwnerRef {
|
||||||
|
return &runnerOwnerRef{
|
||||||
|
kind: po.Kind,
|
||||||
|
ns: po.Namespace,
|
||||||
|
name: po.Name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func refFromRunnerSet(rs *v1alpha1.RunnerSet) *runnerOwnerRef {
|
||||||
|
return &runnerOwnerRef{
|
||||||
|
kind: rs.Kind,
|
||||||
|
ns: rs.Namespace,
|
||||||
|
name: rs.Name,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func refFromHorizontalRunnerAutoscaler(hra *v1alpha1.HorizontalRunnerAutoscaler) *runnerOwnerRef {
|
||||||
|
return &runnerOwnerRef{
|
||||||
|
kind: hra.Kind,
|
||||||
|
ns: hra.Namespace,
|
||||||
|
name: hra.Name,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -10,7 +10,9 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newWorkGenericEphemeralVolume(t *testing.T, storageReq string) corev1.Volume {
|
func newWorkGenericEphemeralVolume(t *testing.T, storageReq string) corev1.Volume {
|
||||||
@@ -56,7 +58,7 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"actions-runner-controller/inject-registration-token": "true",
|
"actions-runner-controller/inject-registration-token": "true",
|
||||||
"runnerset-name": "runner",
|
"actions-runner": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
@@ -125,6 +127,10 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||||
|
Value: "false",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "DOCKER_HOST",
|
Name: "DOCKER_HOST",
|
||||||
Value: "tcp://localhost:2376",
|
Value: "tcp://localhost:2376",
|
||||||
@@ -198,7 +204,7 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"actions-runner-controller/inject-registration-token": "true",
|
"actions-runner-controller/inject-registration-token": "true",
|
||||||
"runnerset-name": "runner",
|
"actions-runner": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
@@ -255,6 +261,10 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||||
|
Value: "false",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
VolumeMounts: []corev1.VolumeMount{
|
VolumeMounts: []corev1.VolumeMount{
|
||||||
{
|
{
|
||||||
@@ -276,7 +286,7 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"actions-runner-controller/inject-registration-token": "true",
|
"actions-runner-controller/inject-registration-token": "true",
|
||||||
"runnerset-name": "runner",
|
"actions-runner": "",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: corev1.PodSpec{
|
Spec: corev1.PodSpec{
|
||||||
@@ -333,6 +343,10 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||||
|
Value: "false",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
VolumeMounts: []corev1.VolumeMount{
|
VolumeMounts: []corev1.VolumeMount{
|
||||||
{
|
{
|
||||||
@@ -515,7 +529,7 @@ func TestNewRunnerPod(t *testing.T) {
|
|||||||
for i := range testcases {
|
for i := range testcases {
|
||||||
tc := testcases[i]
|
tc := testcases[i]
|
||||||
t.Run(tc.description, func(t *testing.T) {
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
got, err := newRunnerPod("runner", tc.template, tc.config, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL)
|
got, err := newRunnerPod(tc.template, tc.config, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, tc.want, got)
|
require.Equal(t, tc.want, got)
|
||||||
})
|
})
|
||||||
@@ -546,7 +560,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"actions-runner-controller/inject-registration-token": "true",
|
"actions-runner-controller/inject-registration-token": "true",
|
||||||
"pod-template-hash": "8857b86c7",
|
"pod-template-hash": "8857b86c7",
|
||||||
"runnerset-name": "runner",
|
"actions-runner": "",
|
||||||
},
|
},
|
||||||
OwnerReferences: []metav1.OwnerReference{
|
OwnerReferences: []metav1.OwnerReference{
|
||||||
{
|
{
|
||||||
@@ -624,6 +638,10 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||||
|
Value: "false",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "DOCKER_HOST",
|
Name: "DOCKER_HOST",
|
||||||
Value: "tcp://localhost:2376",
|
Value: "tcp://localhost:2376",
|
||||||
@@ -703,7 +721,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"actions-runner-controller/inject-registration-token": "true",
|
"actions-runner-controller/inject-registration-token": "true",
|
||||||
"pod-template-hash": "8857b86c7",
|
"pod-template-hash": "8857b86c7",
|
||||||
"runnerset-name": "runner",
|
"actions-runner": "",
|
||||||
},
|
},
|
||||||
OwnerReferences: []metav1.OwnerReference{
|
OwnerReferences: []metav1.OwnerReference{
|
||||||
{
|
{
|
||||||
@@ -769,6 +787,10 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||||
|
Value: "false",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "RUNNER_NAME",
|
Name: "RUNNER_NAME",
|
||||||
Value: "runner",
|
Value: "runner",
|
||||||
@@ -800,7 +822,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"actions-runner-controller/inject-registration-token": "true",
|
"actions-runner-controller/inject-registration-token": "true",
|
||||||
"pod-template-hash": "8857b86c7",
|
"pod-template-hash": "8857b86c7",
|
||||||
"runnerset-name": "runner",
|
"actions-runner": "",
|
||||||
},
|
},
|
||||||
OwnerReferences: []metav1.OwnerReference{
|
OwnerReferences: []metav1.OwnerReference{
|
||||||
{
|
{
|
||||||
@@ -866,6 +888,10 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
Name: "RUNNER_EPHEMERAL",
|
Name: "RUNNER_EPHEMERAL",
|
||||||
Value: "true",
|
Value: "true",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||||
|
Value: "false",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "RUNNER_NAME",
|
Name: "RUNNER_NAME",
|
||||||
Value: "runner",
|
Value: "runner",
|
||||||
@@ -1105,13 +1131,20 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
|
|||||||
|
|
||||||
for i := range testcases {
|
for i := range testcases {
|
||||||
tc := testcases[i]
|
tc := testcases[i]
|
||||||
|
|
||||||
|
rr := &testResourceReader{
|
||||||
|
objects: map[types.NamespacedName]client.Object{},
|
||||||
|
}
|
||||||
|
|
||||||
|
multiClient := NewMultiGitHubClient(rr, &github.Client{GithubBaseURL: githubBaseURL})
|
||||||
|
|
||||||
t.Run(tc.description, func(t *testing.T) {
|
t.Run(tc.description, func(t *testing.T) {
|
||||||
r := &RunnerReconciler{
|
r := &RunnerReconciler{
|
||||||
RunnerImage: defaultRunnerImage,
|
RunnerImage: defaultRunnerImage,
|
||||||
RunnerImagePullSecrets: defaultRunnerImagePullSecrets,
|
RunnerImagePullSecrets: defaultRunnerImagePullSecrets,
|
||||||
DockerImage: defaultDockerImage,
|
DockerImage: defaultDockerImage,
|
||||||
DockerRegistryMirror: defaultDockerRegistryMirror,
|
DockerRegistryMirror: defaultDockerRegistryMirror,
|
||||||
GitHubClient: &github.Client{GithubBaseURL: githubBaseURL},
|
GitHubClient: multiClient,
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
}
|
}
|
||||||
got, err := r.newPod(tc.runner)
|
got, err := r.newPod(tc.runner)
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"gomodules.xyz/jsonpatch/v2"
|
"gomodules.xyz/jsonpatch/v2"
|
||||||
admissionv1 "k8s.io/api/admission/v1"
|
admissionv1 "k8s.io/api/admission/v1"
|
||||||
@@ -29,7 +28,7 @@ type PodRunnerTokenInjector struct {
|
|||||||
Name string
|
Name string
|
||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
GitHubClient *github.Client
|
GitHubClient *MultiGitHubClient
|
||||||
decoder *admission.Decoder
|
decoder *admission.Decoder
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -66,7 +65,12 @@ func (t *PodRunnerTokenInjector) Handle(ctx context.Context, req admission.Reque
|
|||||||
return newEmptyResponse()
|
return newEmptyResponse()
|
||||||
}
|
}
|
||||||
|
|
||||||
rt, err := t.GitHubClient.GetRegistrationToken(context.Background(), enterprise, org, repo, pod.Name)
|
ghc, err := t.GitHubClient.InitForRunnerPod(ctx, &pod)
|
||||||
|
if err != nil {
|
||||||
|
return admission.Errored(http.StatusInternalServerError, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rt, err := ghc.GetRegistrationToken(context.Background(), enterprise, org, repo, pod.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log.Error(err, "Failed to get new registration token")
|
t.Log.Error(err, "Failed to get new registration token")
|
||||||
return admission.Errored(http.StatusInternalServerError, err)
|
return admission.Errored(http.StatusInternalServerError, err)
|
||||||
|
|||||||
@@ -18,7 +18,10 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -33,10 +36,10 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
rbacv1 "k8s.io/api/rbac/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -49,6 +52,8 @@ const (
|
|||||||
|
|
||||||
EnvVarOrg = "RUNNER_ORG"
|
EnvVarOrg = "RUNNER_ORG"
|
||||||
EnvVarRepo = "RUNNER_REPO"
|
EnvVarRepo = "RUNNER_REPO"
|
||||||
|
EnvVarGroup = "RUNNER_GROUP"
|
||||||
|
EnvVarLabels = "RUNNER_LABELS"
|
||||||
EnvVarEnterprise = "RUNNER_ENTERPRISE"
|
EnvVarEnterprise = "RUNNER_ENTERPRISE"
|
||||||
EnvVarEphemeral = "RUNNER_EPHEMERAL"
|
EnvVarEphemeral = "RUNNER_EPHEMERAL"
|
||||||
EnvVarTrue = "true"
|
EnvVarTrue = "true"
|
||||||
@@ -60,7 +65,7 @@ type RunnerReconciler struct {
|
|||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
GitHubClient *github.Client
|
GitHubClient *MultiGitHubClient
|
||||||
RunnerImage string
|
RunnerImage string
|
||||||
RunnerImagePullSecrets []string
|
RunnerImagePullSecrets []string
|
||||||
DockerImage string
|
DockerImage string
|
||||||
@@ -68,16 +73,20 @@ type RunnerReconciler struct {
|
|||||||
Name string
|
Name string
|
||||||
RegistrationRecheckInterval time.Duration
|
RegistrationRecheckInterval time.Duration
|
||||||
RegistrationRecheckJitter time.Duration
|
RegistrationRecheckJitter time.Duration
|
||||||
|
UseRunnerStatusUpdateHook bool
|
||||||
UnregistrationRetryDelay time.Duration
|
UnregistrationRetryDelay time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/finalizers,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/status,verbs=get;update;patch
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/status,verbs=get;update;patch
|
||||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;delete
|
||||||
// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=create;delete;get
|
||||||
|
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=create;delete;get
|
||||||
|
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=create;delete;get
|
||||||
|
|
||||||
func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
log := r.Log.WithValues("runner", req.NamespacedName)
|
log := r.Log.WithValues("runner", req.NamespacedName)
|
||||||
@@ -112,6 +121,9 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
|||||||
// Pod was not found
|
// Pod was not found
|
||||||
return r.processRunnerDeletion(runner, ctx, log, nil)
|
return r.processRunnerDeletion(runner, ctx, log, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r.GitHubClient.DeinitForRunner(&runner)
|
||||||
|
|
||||||
return r.processRunnerDeletion(runner, ctx, log, &pod)
|
return r.processRunnerDeletion(runner, ctx, log, &pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -131,7 +143,7 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
|||||||
|
|
||||||
ready := runnerPodReady(&pod)
|
ready := runnerPodReady(&pod)
|
||||||
|
|
||||||
if runner.Status.Phase != phase || runner.Status.Ready != ready {
|
if (runner.Status.Phase != phase || runner.Status.Ready != ready) && !r.UseRunnerStatusUpdateHook || runner.Status.Phase == "" && r.UseRunnerStatusUpdateHook {
|
||||||
if pod.Status.Phase == corev1.PodRunning {
|
if pod.Status.Phase == corev1.PodRunning {
|
||||||
// Seeing this message, you can expect the runner to become `Running` soon.
|
// Seeing this message, you can expect the runner to become `Running` soon.
|
||||||
log.V(1).Info(
|
log.V(1).Info(
|
||||||
@@ -252,6 +264,96 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
needsServiceAccount := runner.Spec.ServiceAccountName == "" && (r.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes")
|
||||||
|
if needsServiceAccount {
|
||||||
|
serviceAccount := &corev1.ServiceAccount{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: runner.ObjectMeta.Name,
|
||||||
|
Namespace: runner.ObjectMeta.Namespace,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if res := r.createObject(ctx, serviceAccount, serviceAccount.ObjectMeta, &runner, log); res != nil {
|
||||||
|
return *res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rules := []rbacv1.PolicyRule{}
|
||||||
|
|
||||||
|
if r.UseRunnerStatusUpdateHook {
|
||||||
|
rules = append(rules, []rbacv1.PolicyRule{
|
||||||
|
{
|
||||||
|
APIGroups: []string{"actions.summerwind.dev"},
|
||||||
|
Resources: []string{"runners/status"},
|
||||||
|
Verbs: []string{"get", "update", "patch"},
|
||||||
|
ResourceNames: []string{runner.ObjectMeta.Name},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if runner.Spec.ContainerMode == "kubernetes" {
|
||||||
|
// Permissions based on https://github.com/actions/runner-container-hooks/blob/main/packages/k8s/README.md
|
||||||
|
rules = append(rules, []rbacv1.PolicyRule{
|
||||||
|
{
|
||||||
|
APIGroups: []string{""},
|
||||||
|
Resources: []string{"pods"},
|
||||||
|
Verbs: []string{"get", "list", "create", "delete"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
APIGroups: []string{""},
|
||||||
|
Resources: []string{"pods/exec"},
|
||||||
|
Verbs: []string{"get", "create"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
APIGroups: []string{""},
|
||||||
|
Resources: []string{"pods/log"},
|
||||||
|
Verbs: []string{"get", "list", "watch"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
APIGroups: []string{"batch"},
|
||||||
|
Resources: []string{"jobs"},
|
||||||
|
Verbs: []string{"get", "list", "create", "delete"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
APIGroups: []string{""},
|
||||||
|
Resources: []string{"secrets"},
|
||||||
|
Verbs: []string{"get", "list", "create", "delete"},
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
}
|
||||||
|
|
||||||
|
role := &rbacv1.Role{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: runner.ObjectMeta.Name,
|
||||||
|
Namespace: runner.ObjectMeta.Namespace,
|
||||||
|
},
|
||||||
|
Rules: rules,
|
||||||
|
}
|
||||||
|
if res := r.createObject(ctx, role, role.ObjectMeta, &runner, log); res != nil {
|
||||||
|
return *res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
roleBinding := &rbacv1.RoleBinding{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: runner.ObjectMeta.Name,
|
||||||
|
Namespace: runner.ObjectMeta.Namespace,
|
||||||
|
},
|
||||||
|
RoleRef: rbacv1.RoleRef{
|
||||||
|
APIGroup: "rbac.authorization.k8s.io",
|
||||||
|
Kind: "Role",
|
||||||
|
Name: runner.ObjectMeta.Name,
|
||||||
|
},
|
||||||
|
Subjects: []rbacv1.Subject{
|
||||||
|
{
|
||||||
|
Kind: "ServiceAccount",
|
||||||
|
Name: runner.ObjectMeta.Name,
|
||||||
|
Namespace: runner.ObjectMeta.Namespace,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if res := r.createObject(ctx, roleBinding, roleBinding.ObjectMeta, &runner, log); res != nil {
|
||||||
|
return *res, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := r.Create(ctx, &newPod); err != nil {
|
if err := r.Create(ctx, &newPod); err != nil {
|
||||||
if kerrors.IsAlreadyExists(err) {
|
if kerrors.IsAlreadyExists(err) {
|
||||||
// Gracefully handle pod-already-exists errors due to informer cache delay.
|
// Gracefully handle pod-already-exists errors due to informer cache delay.
|
||||||
@@ -274,6 +376,27 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *RunnerReconciler) createObject(ctx context.Context, obj client.Object, meta metav1.ObjectMeta, runner *v1alpha1.Runner, log logr.Logger) *ctrl.Result {
|
||||||
|
kind := strings.Split(reflect.TypeOf(obj).String(), ".")[1]
|
||||||
|
if err := ctrl.SetControllerReference(runner, obj, r.Scheme); err != nil {
|
||||||
|
log.Error(err, fmt.Sprintf("Could not add owner reference to %s %s. %s", kind, meta.Name, err.Error()))
|
||||||
|
return &ctrl.Result{Requeue: true}
|
||||||
|
}
|
||||||
|
if err := r.Create(ctx, obj); err != nil {
|
||||||
|
if kerrors.IsAlreadyExists(err) {
|
||||||
|
log.Info(fmt.Sprintf("Failed to create %s %s as it already exists. Reusing existing %s", kind, meta.Name, kind))
|
||||||
|
r.Recorder.Event(runner, corev1.EventTypeNormal, fmt.Sprintf("%sReused", kind), fmt.Sprintf("Reused %s '%s'", kind, meta.Name))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Error(err, fmt.Sprintf("Retrying as failed to create %s %s resource", kind, meta.Name))
|
||||||
|
return &ctrl.Result{Requeue: true}
|
||||||
|
}
|
||||||
|
r.Recorder.Event(runner, corev1.EventTypeNormal, fmt.Sprintf("%sCreated", kind), fmt.Sprintf("Created %s '%s'", kind, meta.Name))
|
||||||
|
log.Info(fmt.Sprintf("Created %s", kind), "name", meta.Name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v1alpha1.Runner) (bool, error) {
|
func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v1alpha1.Runner) (bool, error) {
|
||||||
if runner.IsRegisterable() {
|
if runner.IsRegisterable() {
|
||||||
return false, nil
|
return false, nil
|
||||||
@@ -281,7 +404,12 @@ func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v
|
|||||||
|
|
||||||
log := r.Log.WithValues("runner", runner.Name)
|
log := r.Log.WithValues("runner", runner.Name)
|
||||||
|
|
||||||
rt, err := r.GitHubClient.GetRegistrationToken(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
ghc, err := r.GitHubClient.InitForRunner(ctx, &runner)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rt, err := ghc.GetRegistrationToken(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// An error can be a permanent, permission issue like the below:
|
// An error can be a permanent, permission issue like the below:
|
||||||
// POST https://api.github.com/enterprises/YOUR_ENTERPRISE/actions/runners/registration-token: 403 Resource not accessible by integration []
|
// POST https://api.github.com/enterprises/YOUR_ENTERPRISE/actions/runners/registration-token: 403 Resource not accessible by integration []
|
||||||
@@ -321,6 +449,11 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
labels[k] = v
|
labels[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ghc, err := r.GitHubClient.InitForRunner(context.Background(), &runner)
|
||||||
|
if err != nil {
|
||||||
|
return corev1.Pod{}, err
|
||||||
|
}
|
||||||
|
|
||||||
// This implies that...
|
// This implies that...
|
||||||
//
|
//
|
||||||
// (1) We recreate the runner pod whenever the runner has changes in:
|
// (1) We recreate the runner pod whenever the runner has changes in:
|
||||||
@@ -344,7 +477,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
filterLabels(runner.ObjectMeta.Labels, LabelKeyRunnerTemplateHash),
|
filterLabels(runner.ObjectMeta.Labels, LabelKeyRunnerTemplateHash),
|
||||||
runner.ObjectMeta.Annotations,
|
runner.ObjectMeta.Annotations,
|
||||||
runner.Spec,
|
runner.Spec,
|
||||||
r.GitHubClient.GithubBaseURL,
|
ghc.GithubBaseURL,
|
||||||
// Token change should trigger replacement.
|
// Token change should trigger replacement.
|
||||||
// We need to include this explicitly here because
|
// We need to include this explicitly here because
|
||||||
// runner.Spec does not contain the possibly updated token stored in the
|
// runner.Spec does not contain the possibly updated token stored in the
|
||||||
@@ -412,7 +545,17 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
template.Spec.SecurityContext = runner.Spec.SecurityContext
|
template.Spec.SecurityContext = runner.Spec.SecurityContext
|
||||||
template.Spec.EnableServiceLinks = runner.Spec.EnableServiceLinks
|
template.Spec.EnableServiceLinks = runner.Spec.EnableServiceLinks
|
||||||
|
|
||||||
pod, err := newRunnerPod(runner.Name, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubClient.GithubBaseURL)
|
if runner.Spec.ContainerMode == "kubernetes" {
|
||||||
|
workDir := runner.Spec.WorkDir
|
||||||
|
if workDir == "" {
|
||||||
|
workDir = "/runner/_work"
|
||||||
|
}
|
||||||
|
if err := applyWorkVolumeClaimTemplateToPod(&template, runner.Spec.WorkVolumeClaimTemplate, workDir); err != nil {
|
||||||
|
return corev1.Pod{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, ghc.GithubBaseURL, r.UseRunnerStatusUpdateHook)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pod, err
|
return pod, err
|
||||||
}
|
}
|
||||||
@@ -424,6 +567,9 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
// if operater provides a work volume mount, use that
|
// if operater provides a work volume mount, use that
|
||||||
isPresent, _ := workVolumeMountPresent(runnerSpec.VolumeMounts)
|
isPresent, _ := workVolumeMountPresent(runnerSpec.VolumeMounts)
|
||||||
if isPresent {
|
if isPresent {
|
||||||
|
if runnerSpec.ContainerMode == "kubernetes" {
|
||||||
|
return pod, errors.New("volume mount \"work\" should be specified by workVolumeClaimTemplate in container mode kubernetes")
|
||||||
|
}
|
||||||
// remove work volume since it will be provided from runnerSpec.Volumes
|
// remove work volume since it will be provided from runnerSpec.Volumes
|
||||||
// if we don't remove it here we would get a duplicate key error, i.e. two volumes named work
|
// if we don't remove it here we would get a duplicate key error, i.e. two volumes named work
|
||||||
_, index := workVolumeMountPresent(pod.Spec.Containers[0].VolumeMounts)
|
_, index := workVolumeMountPresent(pod.Spec.Containers[0].VolumeMounts)
|
||||||
@@ -437,6 +583,9 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
// if operator provides a work volume. use that
|
// if operator provides a work volume. use that
|
||||||
isPresent, _ := workVolumePresent(runnerSpec.Volumes)
|
isPresent, _ := workVolumePresent(runnerSpec.Volumes)
|
||||||
if isPresent {
|
if isPresent {
|
||||||
|
if runnerSpec.ContainerMode == "kubernetes" {
|
||||||
|
return pod, errors.New("volume \"work\" should be specified by workVolumeClaimTemplate in container mode kubernetes")
|
||||||
|
}
|
||||||
_, index := workVolumePresent(pod.Spec.Volumes)
|
_, index := workVolumePresent(pod.Spec.Volumes)
|
||||||
|
|
||||||
// remove work volume since it will be provided from runnerSpec.Volumes
|
// remove work volume since it will be provided from runnerSpec.Volumes
|
||||||
@@ -446,6 +595,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
|
|
||||||
pod.Spec.Volumes = append(pod.Spec.Volumes, runnerSpec.Volumes...)
|
pod.Spec.Volumes = append(pod.Spec.Volumes, runnerSpec.Volumes...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(runnerSpec.InitContainers) != 0 {
|
if len(runnerSpec.InitContainers) != 0 {
|
||||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, runnerSpec.InitContainers...)
|
pod.Spec.InitContainers = append(pod.Spec.InitContainers, runnerSpec.InitContainers...)
|
||||||
}
|
}
|
||||||
@@ -453,9 +603,13 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
if runnerSpec.NodeSelector != nil {
|
if runnerSpec.NodeSelector != nil {
|
||||||
pod.Spec.NodeSelector = runnerSpec.NodeSelector
|
pod.Spec.NodeSelector = runnerSpec.NodeSelector
|
||||||
}
|
}
|
||||||
|
|
||||||
if runnerSpec.ServiceAccountName != "" {
|
if runnerSpec.ServiceAccountName != "" {
|
||||||
pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName
|
pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName
|
||||||
|
} else if r.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes" {
|
||||||
|
pod.Spec.ServiceAccountName = runner.ObjectMeta.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
if runnerSpec.AutomountServiceAccountToken != nil {
|
if runnerSpec.AutomountServiceAccountToken != nil {
|
||||||
pod.Spec.AutomountServiceAccountToken = runnerSpec.AutomountServiceAccountToken
|
pod.Spec.AutomountServiceAccountToken = runnerSpec.AutomountServiceAccountToken
|
||||||
}
|
}
|
||||||
@@ -476,6 +630,10 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
pod.Spec.Tolerations = runnerSpec.Tolerations
|
pod.Spec.Tolerations = runnerSpec.Tolerations
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if runnerSpec.PriorityClassName != "" {
|
||||||
|
pod.Spec.PriorityClassName = runnerSpec.PriorityClassName
|
||||||
|
}
|
||||||
|
|
||||||
if len(runnerSpec.TopologySpreadConstraints) != 0 {
|
if len(runnerSpec.TopologySpreadConstraints) != 0 {
|
||||||
pod.Spec.TopologySpreadConstraints = runnerSpec.TopologySpreadConstraints
|
pod.Spec.TopologySpreadConstraints = runnerSpec.TopologySpreadConstraints
|
||||||
}
|
}
|
||||||
@@ -526,7 +684,45 @@ func mutatePod(pod *corev1.Pod, token string) *corev1.Pod {
|
|||||||
return updated
|
return updated
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string) (corev1.Pod, error) {
|
func runnerHookEnvs(pod *corev1.Pod) ([]corev1.EnvVar, error) {
|
||||||
|
isRequireSameNode, err := isRequireSameNode(pod)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return []corev1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "ACTIONS_RUNNER_CONTAINER_HOOKS",
|
||||||
|
Value: defaultRunnerHookPath,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER",
|
||||||
|
Value: "true",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ACTIONS_RUNNER_POD_NAME",
|
||||||
|
ValueFrom: &corev1.EnvVarSource{
|
||||||
|
FieldRef: &corev1.ObjectFieldSelector{
|
||||||
|
FieldPath: "metadata.name",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ACTIONS_RUNNER_JOB_NAMESPACE",
|
||||||
|
ValueFrom: &corev1.EnvVarSource{
|
||||||
|
FieldRef: &corev1.ObjectFieldSelector{
|
||||||
|
FieldPath: "metadata.namespace",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "ACTIONS_RUNNER_REQUIRE_SAME_NODE",
|
||||||
|
Value: strconv.FormatBool(isRequireSameNode),
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, useRunnerStatusUpdateHook bool) (corev1.Pod, error) {
|
||||||
var (
|
var (
|
||||||
privileged bool = true
|
privileged bool = true
|
||||||
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
||||||
@@ -535,11 +731,20 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||||||
dockerdInRunnerPrivileged bool = dockerdInRunner
|
dockerdInRunnerPrivileged bool = dockerdInRunner
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if containerMode == "kubernetes" {
|
||||||
|
dockerdInRunner = false
|
||||||
|
dockerEnabled = false
|
||||||
|
dockerdInRunnerPrivileged = false
|
||||||
|
}
|
||||||
|
|
||||||
template = *template.DeepCopy()
|
template = *template.DeepCopy()
|
||||||
|
|
||||||
// This label selector is used by default when rd.Spec.Selector is empty.
|
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunnerSetName, runnerName)
|
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunner, "")
|
||||||
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyPodMutation, LabelValuePodMutation)
|
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyPodMutation, LabelValuePodMutation)
|
||||||
|
if runnerSpec.GitHubAPICredentialsFrom != nil {
|
||||||
|
template.ObjectMeta.Annotations = CloneAndAddLabel(template.ObjectMeta.Annotations, annotationKeyGitHubAPICredsSecret, runnerSpec.GitHubAPICredentialsFrom.SecretRef.Name)
|
||||||
|
}
|
||||||
|
|
||||||
workDir := runnerSpec.WorkDir
|
workDir := runnerSpec.WorkDir
|
||||||
if workDir == "" {
|
if workDir == "" {
|
||||||
@@ -569,11 +774,11 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||||||
Value: runnerSpec.Enterprise,
|
Value: runnerSpec.Enterprise,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "RUNNER_LABELS",
|
Name: EnvVarLabels,
|
||||||
Value: strings.Join(runnerSpec.Labels, ","),
|
Value: strings.Join(runnerSpec.Labels, ","),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "RUNNER_GROUP",
|
Name: EnvVarGroup,
|
||||||
Value: runnerSpec.Group,
|
Value: runnerSpec.Group,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -596,6 +801,10 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||||||
Name: EnvVarEphemeral,
|
Name: EnvVarEphemeral,
|
||||||
Value: fmt.Sprintf("%v", ephemeral),
|
Value: fmt.Sprintf("%v", ephemeral),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_STATUS_UPDATE_HOOK",
|
||||||
|
Value: fmt.Sprintf("%v", useRunnerStatusUpdateHook),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var seLinuxOptions *corev1.SELinuxOptions
|
var seLinuxOptions *corev1.SELinuxOptions
|
||||||
@@ -621,6 +830,17 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if containerMode == "kubernetes" {
|
||||||
|
if dockerdContainer != nil {
|
||||||
|
template.Spec.Containers = append(template.Spec.Containers[:dockerdContainerIndex], template.Spec.Containers[dockerdContainerIndex+1:]...)
|
||||||
|
}
|
||||||
|
if runnerContainerIndex < runnerContainerIndex {
|
||||||
|
runnerContainerIndex--
|
||||||
|
}
|
||||||
|
dockerdContainer = nil
|
||||||
|
dockerdContainerIndex = -1
|
||||||
|
}
|
||||||
|
|
||||||
if runnerContainer == nil {
|
if runnerContainer == nil {
|
||||||
runnerContainerIndex = -1
|
runnerContainerIndex = -1
|
||||||
runnerContainer = &corev1.Container{
|
runnerContainer = &corev1.Container{
|
||||||
@@ -651,6 +871,13 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||||||
}
|
}
|
||||||
|
|
||||||
runnerContainer.Env = append(runnerContainer.Env, env...)
|
runnerContainer.Env = append(runnerContainer.Env, env...)
|
||||||
|
if containerMode == "kubernetes" {
|
||||||
|
hookEnvs, err := runnerHookEnvs(&template)
|
||||||
|
if err != nil {
|
||||||
|
return corev1.Pod{}, err
|
||||||
|
}
|
||||||
|
runnerContainer.Env = append(runnerContainer.Env, hookEnvs...)
|
||||||
|
}
|
||||||
|
|
||||||
if runnerContainer.SecurityContext == nil {
|
if runnerContainer.SecurityContext == nil {
|
||||||
runnerContainer.SecurityContext = &corev1.SecurityContext{}
|
runnerContainer.SecurityContext = &corev1.SecurityContext{}
|
||||||
@@ -875,6 +1102,10 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
|||||||
return *pod, nil
|
return *pod, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newRunnerPod(template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, useRunnerStatusUpdateHookEphemeralRole bool) (corev1.Pod, error) {
|
||||||
|
return newRunnerPodWithContainerMode("", template, runnerSpec, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, useRunnerStatusUpdateHookEphemeralRole)
|
||||||
|
}
|
||||||
|
|
||||||
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
name := "runner-controller"
|
name := "runner-controller"
|
||||||
if r.Name != "" {
|
if r.Name != "" {
|
||||||
@@ -937,3 +1168,71 @@ func workVolumeMountPresent(items []corev1.VolumeMount) (bool, int) {
|
|||||||
}
|
}
|
||||||
return false, 0
|
return false, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func applyWorkVolumeClaimTemplateToPod(pod *corev1.Pod, workVolumeClaimTemplate *v1alpha1.WorkVolumeClaimTemplate, workDir string) error {
|
||||||
|
if workVolumeClaimTemplate == nil {
|
||||||
|
return errors.New("work volume claim template must be specified in container mode kubernetes")
|
||||||
|
}
|
||||||
|
for i := range pod.Spec.Volumes {
|
||||||
|
if pod.Spec.Volumes[i].Name == "work" {
|
||||||
|
return fmt.Errorf("Work volume should not be specified in container mode kubernetes. workVolumeClaimTemplate field should be used instead.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pod.Spec.Volumes = append(pod.Spec.Volumes, workVolumeClaimTemplate.V1Volume())
|
||||||
|
|
||||||
|
var runnerContainer *corev1.Container
|
||||||
|
for i := range pod.Spec.Containers {
|
||||||
|
if pod.Spec.Containers[i].Name == "runner" {
|
||||||
|
runnerContainer = &pod.Spec.Containers[i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if runnerContainer == nil {
|
||||||
|
return fmt.Errorf("runner container is not present when applying work volume claim template")
|
||||||
|
}
|
||||||
|
|
||||||
|
if isPresent, _ := workVolumeMountPresent(runnerContainer.VolumeMounts); isPresent {
|
||||||
|
return fmt.Errorf("volume mount \"work\" should not be present on the runner container in container mode kubernetes")
|
||||||
|
}
|
||||||
|
|
||||||
|
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts, workVolumeClaimTemplate.V1VolumeMount(workDir))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isRequireSameNode specifies for the runner in kubernetes mode wether it should
|
||||||
|
// schedule jobs to the same node where the runner is
|
||||||
|
//
|
||||||
|
// This function should only be called in containerMode: kubernetes
|
||||||
|
func isRequireSameNode(pod *corev1.Pod) (bool, error) {
|
||||||
|
isPresent, index := workVolumePresent(pod.Spec.Volumes)
|
||||||
|
if !isPresent {
|
||||||
|
return true, errors.New("internal error: work volume mount must exist in containerMode: kubernetes")
|
||||||
|
}
|
||||||
|
|
||||||
|
if pod.Spec.Volumes[index].Ephemeral == nil || pod.Spec.Volumes[index].Ephemeral.VolumeClaimTemplate == nil {
|
||||||
|
return true, errors.New("containerMode: kubernetes should have pod.Spec.Volumes[].Ephemeral.VolumeClaimTemplate set")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, accessMode := range pod.Spec.Volumes[index].Ephemeral.VolumeClaimTemplate.Spec.AccessModes {
|
||||||
|
switch accessMode {
|
||||||
|
case corev1.ReadWriteOnce:
|
||||||
|
return true, nil
|
||||||
|
case corev1.ReadWriteMany:
|
||||||
|
default:
|
||||||
|
return true, errors.New("actions-runner-controller supports ReadWriteOnce and ReadWriteMany modes only")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func overwriteRunnerEnv(runner *v1alpha1.Runner, key string, value string) {
|
||||||
|
for i := range runner.Spec.Env {
|
||||||
|
if runner.Spec.Env[i].Name == key {
|
||||||
|
runner.Spec.Env[i].Value = value
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
runner.Spec.Env = append(runner.Spec.Env, corev1.EnvVar{Name: key, Value: value})
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
gogithub "github.com/google/go-github/v39/github"
|
gogithub "github.com/google/go-github/v45/github"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
@@ -151,7 +151,10 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
|
|||||||
|
|
||||||
log.V(1).Info("Failed to unregister runner before deleting the pod.", "error", err)
|
log.V(1).Info("Failed to unregister runner before deleting the pod.", "error", err)
|
||||||
|
|
||||||
var runnerBusy bool
|
var (
|
||||||
|
runnerBusy bool
|
||||||
|
runnerUnregistrationFailureMessage string
|
||||||
|
)
|
||||||
|
|
||||||
errRes := &gogithub.ErrorResponse{}
|
errRes := &gogithub.ErrorResponse{}
|
||||||
if errors.As(err, &errRes) {
|
if errors.As(err, &errRes) {
|
||||||
@@ -173,6 +176,7 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
|
|||||||
}
|
}
|
||||||
|
|
||||||
runnerBusy = errRes.Response.StatusCode == 422
|
runnerBusy = errRes.Response.StatusCode == 422
|
||||||
|
runnerUnregistrationFailureMessage = errRes.Message
|
||||||
|
|
||||||
if runnerBusy && code != nil {
|
if runnerBusy && code != nil {
|
||||||
log.V(2).Info("Runner container has already stopped but the unregistration attempt failed. "+
|
log.V(2).Info("Runner container has already stopped but the unregistration attempt failed. "+
|
||||||
@@ -187,6 +191,11 @@ func ensureRunnerUnregistration(ctx context.Context, retryDelay time.Duration, l
|
|||||||
}
|
}
|
||||||
|
|
||||||
if runnerBusy {
|
if runnerBusy {
|
||||||
|
_, err := annotatePodOnce(ctx, c, log, pod, AnnotationKeyUnregistrationFailureMessage, runnerUnregistrationFailureMessage)
|
||||||
|
if err != nil {
|
||||||
|
return &ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
// We want to prevent spamming the deletion attemps but returning ctrl.Result with RequeueAfter doesn't
|
// We want to prevent spamming the deletion attemps but returning ctrl.Result with RequeueAfter doesn't
|
||||||
// work as the reconcilation can happen earlier due to pod status update.
|
// work as the reconcilation can happen earlier due to pod status update.
|
||||||
// For ephemeral runners, we can expect it to stop and unregister itself on completion.
|
// For ephemeral runners, we can expect it to stop and unregister itself on completion.
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
@@ -31,8 +32,6 @@ import (
|
|||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunnerPodReconciler reconciles a Runner object
|
// RunnerPodReconciler reconciles a Runner object
|
||||||
@@ -41,7 +40,7 @@ type RunnerPodReconciler struct {
|
|||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
GitHubClient *github.Client
|
GitHubClient *MultiGitHubClient
|
||||||
Name string
|
Name string
|
||||||
RegistrationRecheckInterval time.Duration
|
RegistrationRecheckInterval time.Duration
|
||||||
RegistrationRecheckJitter time.Duration
|
RegistrationRecheckJitter time.Duration
|
||||||
@@ -50,6 +49,7 @@ type RunnerPodReconciler struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;update;patch;delete
|
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch
|
||||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
|
||||||
func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
@@ -60,8 +60,11 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, isRunnerPod := runnerPod.Labels[LabelKeyRunnerSetName]
|
_, isRunnerPod := runnerPod.Labels[LabelKeyRunner]
|
||||||
if !isRunnerPod {
|
_, isRunnerSetPod := runnerPod.Labels[LabelKeyRunnerSetName]
|
||||||
|
_, isRunnerDeploymentPod := runnerPod.Labels[LabelKeyRunnerDeploymentName]
|
||||||
|
|
||||||
|
if !isRunnerPod && !isRunnerSetPod && !isRunnerDeploymentPod {
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,6 +80,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
var enterprise, org, repo string
|
var enterprise, org, repo string
|
||||||
|
var isContainerMode bool
|
||||||
|
|
||||||
for _, e := range envvars {
|
for _, e := range envvars {
|
||||||
switch e.Name {
|
switch e.Name {
|
||||||
@@ -86,13 +90,25 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
org = e.Value
|
org = e.Value
|
||||||
case EnvVarRepo:
|
case EnvVarRepo:
|
||||||
repo = e.Value
|
repo = e.Value
|
||||||
|
case "ACTIONS_RUNNER_CONTAINER_HOOKS":
|
||||||
|
isContainerMode = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ghc, err := r.GitHubClient.InitForRunnerPod(ctx, &runnerPod)
|
||||||
|
if err != nil {
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
if runnerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
if runnerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
finalizers, added := addFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
finalizers, added := addFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||||
|
|
||||||
if added {
|
var cleanupFinalizersAdded bool
|
||||||
|
if isContainerMode {
|
||||||
|
finalizers, cleanupFinalizersAdded = addFinalizer(finalizers, runnerLinkedResourcesFinalizerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if added || cleanupFinalizersAdded {
|
||||||
newRunner := runnerPod.DeepCopy()
|
newRunner := runnerPod.DeepCopy()
|
||||||
newRunner.ObjectMeta.Finalizers = finalizers
|
newRunner.ObjectMeta.Finalizers = finalizers
|
||||||
|
|
||||||
@@ -108,13 +124,34 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
} else {
|
} else {
|
||||||
log.V(2).Info("Seen deletion-timestamp is already set")
|
log.V(2).Info("Seen deletion-timestamp is already set")
|
||||||
|
|
||||||
|
if finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerLinkedResourcesFinalizerName); removed {
|
||||||
|
if err := r.cleanupRunnerLinkedPods(ctx, &runnerPod, log); err != nil {
|
||||||
|
log.Info("Runner-linked pods clean up that has failed due to an error. If this persists, please manually remove the runner-linked pods to unblock ARC", "err", err.Error())
|
||||||
|
return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil
|
||||||
|
}
|
||||||
|
if err := r.cleanupRunnerLinkedSecrets(ctx, &runnerPod, log); err != nil {
|
||||||
|
log.Info("Runner-linked secrets clean up that has failed due to an error. If this persists, please manually remove the runner-linked secrets to unblock ARC", "err", err.Error())
|
||||||
|
return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil
|
||||||
|
}
|
||||||
|
patchedPod := runnerPod.DeepCopy()
|
||||||
|
patchedPod.ObjectMeta.Finalizers = finalizers
|
||||||
|
|
||||||
|
if err := r.Patch(ctx, patchedPod, client.MergeFrom(&runnerPod)); err != nil {
|
||||||
|
log.Error(err, "Failed to update runner for finalizer linked resources removal")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise the subsequent patch request can revive the removed finalizer and it will trigger a unnecessary reconcilation
|
||||||
|
runnerPod = *patchedPod
|
||||||
|
}
|
||||||
|
|
||||||
finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||||
|
|
||||||
if removed {
|
if removed {
|
||||||
// In a standard scenario, the upstream controller, like runnerset-controller, ensures this runner to be gracefully stopped before the deletion timestamp is set.
|
// In a standard scenario, the upstream controller, like runnerset-controller, ensures this runner to be gracefully stopped before the deletion timestamp is set.
|
||||||
// But for the case that the user manually deleted it for whatever reason,
|
// But for the case that the user manually deleted it for whatever reason,
|
||||||
// we have to ensure it to gracefully stop now.
|
// we have to ensure it to gracefully stop now.
|
||||||
updatedPod, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, r.GitHubClient, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
updatedPod, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, ghc, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
||||||
if res != nil {
|
if res != nil {
|
||||||
return *res, err
|
return *res, err
|
||||||
}
|
}
|
||||||
@@ -130,6 +167,8 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
|
|
||||||
log.V(2).Info("Removed finalizer")
|
log.V(2).Info("Removed finalizer")
|
||||||
|
|
||||||
|
r.GitHubClient.DeinitForRunnerPod(updatedPod)
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -168,7 +207,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
po, res, err := ensureRunnerPodRegistered(ctx, log, r.GitHubClient, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
po, res, err := ensureRunnerPodRegistered(ctx, log, ghc, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
||||||
if res != nil {
|
if res != nil {
|
||||||
return *res, err
|
return *res, err
|
||||||
}
|
}
|
||||||
@@ -182,7 +221,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
//
|
//
|
||||||
// In a standard scenario, ARC starts the unregistration process before marking the pod for deletion at all,
|
// In a standard scenario, ARC starts the unregistration process before marking the pod for deletion at all,
|
||||||
// so that it isn't subject to terminationGracePeriod and can safely take hours to finish it's work.
|
// so that it isn't subject to terminationGracePeriod and can safely take hours to finish it's work.
|
||||||
_, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, r.GitHubClient, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
_, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, ghc, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
|
||||||
if res != nil {
|
if res != nil {
|
||||||
return *res, err
|
return *res, err
|
||||||
}
|
}
|
||||||
@@ -222,3 +261,93 @@ func (r *RunnerPodReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||||||
Named(name).
|
Named(name).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *RunnerPodReconciler) cleanupRunnerLinkedPods(ctx context.Context, pod *corev1.Pod, log logr.Logger) error {
|
||||||
|
var runnerLinkedPodList corev1.PodList
|
||||||
|
if err := r.List(ctx, &runnerLinkedPodList, client.InNamespace(pod.Namespace), client.MatchingLabels(
|
||||||
|
map[string]string{
|
||||||
|
"runner-pod": pod.ObjectMeta.Name,
|
||||||
|
},
|
||||||
|
)); err != nil {
|
||||||
|
return fmt.Errorf("failed to list runner-linked pods: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
wg sync.WaitGroup
|
||||||
|
errs []error
|
||||||
|
)
|
||||||
|
for _, p := range runnerLinkedPodList.Items {
|
||||||
|
if !p.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
p := p
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := r.Delete(ctx, &p); err != nil {
|
||||||
|
if kerrors.IsNotFound(err) || kerrors.IsGone(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
errs = append(errs, fmt.Errorf("delete pod %q error: %v", p.ObjectMeta.Name, err))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if len(errs) > 0 {
|
||||||
|
for _, err := range errs {
|
||||||
|
log.Error(err, "failed to remove runner-linked pod")
|
||||||
|
}
|
||||||
|
return errors.New("failed to remove some runner linked pods")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunnerPodReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, pod *corev1.Pod, log logr.Logger) error {
|
||||||
|
log.V(2).Info("Listing runner-linked secrets to be deleted", "ns", pod.Namespace)
|
||||||
|
|
||||||
|
var runnerLinkedSecretList corev1.SecretList
|
||||||
|
if err := r.List(ctx, &runnerLinkedSecretList, client.InNamespace(pod.Namespace), client.MatchingLabels(
|
||||||
|
map[string]string{
|
||||||
|
"runner-pod": pod.ObjectMeta.Name,
|
||||||
|
},
|
||||||
|
)); err != nil {
|
||||||
|
return fmt.Errorf("failed to list runner-linked secrets: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
wg sync.WaitGroup
|
||||||
|
errs []error
|
||||||
|
)
|
||||||
|
for _, s := range runnerLinkedSecretList.Items {
|
||||||
|
if !s.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
s := s
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := r.Delete(ctx, &s); err != nil {
|
||||||
|
if kerrors.IsNotFound(err) || kerrors.IsGone(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
errs = append(errs, fmt.Errorf("delete secret %q error: %v", s.ObjectMeta.Name, err))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if len(errs) > 0 {
|
||||||
|
for _, err := range errs {
|
||||||
|
log.Error(err, "failed to remove runner-linked secret")
|
||||||
|
}
|
||||||
|
return errors.New("failed to remove some runner linked secrets")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -179,7 +179,10 @@ func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||||||
newDesiredReplicas := getIntOrDefault(desiredRS.Spec.Replicas, defaultReplicas)
|
newDesiredReplicas := getIntOrDefault(desiredRS.Spec.Replicas, defaultReplicas)
|
||||||
|
|
||||||
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
||||||
if currentDesiredReplicas != newDesiredReplicas {
|
//
|
||||||
|
// If we missed taking the EffectiveTime diff into account, you might end up experiencing scale-ups being delayed scale-down.
|
||||||
|
// See https://github.com/actions-runner-controller/actions-runner-controller/pull/1477#issuecomment-1164154496
|
||||||
|
if currentDesiredReplicas != newDesiredReplicas || newestSet.Spec.EffectiveTime != rd.Spec.EffectiveTime {
|
||||||
newestSet.Spec.Replicas = &newDesiredReplicas
|
newestSet.Spec.Replicas = &newDesiredReplicas
|
||||||
newestSet.Spec.EffectiveTime = rd.Spec.EffectiveTime
|
newestSet.Spec.EffectiveTime = rd.Spec.EffectiveTime
|
||||||
|
|
||||||
|
|||||||
@@ -32,17 +32,15 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunnerReplicaSetReconciler reconciles a Runner object
|
// RunnerReplicaSetReconciler reconciles a Runner object
|
||||||
type RunnerReplicaSetReconciler struct {
|
type RunnerReplicaSetReconciler struct {
|
||||||
client.Client
|
client.Client
|
||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
GitHubClient *github.Client
|
Name string
|
||||||
Name string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -52,15 +52,13 @@ func SetupTest(ctx2 context.Context) *corev1.Namespace {
|
|||||||
|
|
||||||
runnersList = fake.NewRunnersList()
|
runnersList = fake.NewRunnersList()
|
||||||
server = runnersList.GetServer()
|
server = runnersList.GetServer()
|
||||||
ghClient := newGithubClient(server)
|
|
||||||
|
|
||||||
controller := &RunnerReplicaSetReconciler{
|
controller := &RunnerReplicaSetReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Scheme: scheme.Scheme,
|
Scheme: scheme.Scheme,
|
||||||
Log: logf.Log,
|
Log: logf.Log,
|
||||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||||
GitHubClient: ghClient,
|
Name: "runnerreplicaset-" + ns.Name,
|
||||||
Name: "runnerreplicaset-" + ns.Name,
|
|
||||||
}
|
}
|
||||||
err = controller.SetupWithManager(mgr)
|
err = controller.SetupWithManager(mgr)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||||
|
|||||||
@@ -45,12 +45,13 @@ type RunnerSetReconciler struct {
|
|||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
|
|
||||||
CommonRunnerLabels []string
|
CommonRunnerLabels []string
|
||||||
GitHubBaseURL string
|
GitHubClient *MultiGitHubClient
|
||||||
RunnerImage string
|
RunnerImage string
|
||||||
RunnerImagePullSecrets []string
|
RunnerImagePullSecrets []string
|
||||||
DockerImage string
|
DockerImage string
|
||||||
DockerRegistryMirror string
|
DockerRegistryMirror string
|
||||||
|
UseRunnerStatusUpdateHook bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -80,6 +81,8 @@ func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !runnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
if !runnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
r.GitHubClient.DeinitForRunnerSet(runnerSet)
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,7 +100,7 @@ func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
desiredStatefulSet, err := r.newStatefulSet(runnerSet)
|
desiredStatefulSet, err := r.newStatefulSet(ctx, runnerSet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Recorder.Event(runnerSet, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
r.Recorder.Event(runnerSet, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||||
|
|
||||||
@@ -185,7 +188,7 @@ func getRunnerSetSelector(runnerSet *v1alpha1.RunnerSet) *metav1.LabelSelector {
|
|||||||
var LabelKeyPodMutation = "actions-runner-controller/inject-registration-token"
|
var LabelKeyPodMutation = "actions-runner-controller/inject-registration-token"
|
||||||
var LabelValuePodMutation = "true"
|
var LabelValuePodMutation = "true"
|
||||||
|
|
||||||
func (r *RunnerSetReconciler) newStatefulSet(runnerSet *v1alpha1.RunnerSet) (*appsv1.StatefulSet, error) {
|
func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1alpha1.RunnerSet) (*appsv1.StatefulSet, error) {
|
||||||
runnerSetWithOverrides := *runnerSet.Spec.DeepCopy()
|
runnerSetWithOverrides := *runnerSet.Spec.DeepCopy()
|
||||||
|
|
||||||
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, r.CommonRunnerLabels...)
|
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, r.CommonRunnerLabels...)
|
||||||
@@ -195,7 +198,40 @@ func (r *RunnerSetReconciler) newStatefulSet(runnerSet *v1alpha1.RunnerSet) (*ap
|
|||||||
Spec: runnerSetWithOverrides.StatefulSetSpec.Template.Spec,
|
Spec: runnerSetWithOverrides.StatefulSetSpec.Template.Spec,
|
||||||
}
|
}
|
||||||
|
|
||||||
pod, err := newRunnerPod(runnerSet.Name, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubBaseURL)
|
if runnerSet.Spec.RunnerConfig.ContainerMode == "kubernetes" {
|
||||||
|
found := false
|
||||||
|
for i := range template.Spec.Containers {
|
||||||
|
if template.Spec.Containers[i].Name == containerName {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
||||||
|
Name: "runner",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
workDir := runnerSet.Spec.RunnerConfig.WorkDir
|
||||||
|
if workDir == "" {
|
||||||
|
workDir = "/runner/_work"
|
||||||
|
}
|
||||||
|
if err := applyWorkVolumeClaimTemplateToPod(&template, runnerSet.Spec.WorkVolumeClaimTemplate, workDir); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
template.Spec.ServiceAccountName = runnerSet.Spec.ServiceAccountName
|
||||||
|
}
|
||||||
|
|
||||||
|
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunnerSetName, runnerSet.Name)
|
||||||
|
|
||||||
|
ghc, err := r.GitHubClient.InitForRunnerSet(ctx, runnerSet)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
githubBaseURL := ghc.GithubBaseURL
|
||||||
|
|
||||||
|
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, githubBaseURL, r.UseRunnerStatusUpdateHook)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
31
controllers/testresourcereader.go
Normal file
31
controllers/testresourcereader.go
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testResourceReader struct {
|
||||||
|
objects map[types.NamespacedName]client.Object
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *testResourceReader) Get(_ context.Context, nsName types.NamespacedName, obj client.Object) error {
|
||||||
|
ret, ok := r.objects[nsName]
|
||||||
|
if !ok {
|
||||||
|
return &kerrors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}}
|
||||||
|
}
|
||||||
|
v := reflect.ValueOf(obj)
|
||||||
|
if v.Kind() != reflect.Ptr {
|
||||||
|
return errors.New("obj must be a pointer")
|
||||||
|
}
|
||||||
|
|
||||||
|
v.Elem().Set(reflect.ValueOf(ret).Elem())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
35
controllers/testresourcereader_test.go
Normal file
35
controllers/testresourcereader_test.go
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestResourceReader(t *testing.T) {
|
||||||
|
rr := &testResourceReader{
|
||||||
|
objects: map[types.NamespacedName]client.Object{
|
||||||
|
{Namespace: "default", Name: "sec1"}: &corev1.Secret{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Namespace: "default",
|
||||||
|
Name: "sec1",
|
||||||
|
},
|
||||||
|
Data: map[string][]byte{
|
||||||
|
"foo": []byte("bar"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var sec corev1.Secret
|
||||||
|
|
||||||
|
err := rr.Get(context.Background(), types.NamespacedName{Namespace: "default", Name: "sec1"}, &sec)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, []byte("bar"), sec.Data["foo"])
|
||||||
|
}
|
||||||
@@ -3,6 +3,9 @@ package controllers
|
|||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_filterLabels(t *testing.T) {
|
func Test_filterLabels(t *testing.T) {
|
||||||
@@ -32,3 +35,94 @@ func Test_filterLabels(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_workVolumeClaimTemplateVolumeV1VolumeTransformation(t *testing.T) {
|
||||||
|
storageClassName := "local-storage"
|
||||||
|
workVolumeClaimTemplate := v1alpha1.WorkVolumeClaimTemplate{
|
||||||
|
StorageClassName: storageClassName,
|
||||||
|
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce, corev1.ReadWriteMany},
|
||||||
|
Resources: corev1.ResourceRequirements{},
|
||||||
|
}
|
||||||
|
want := corev1.Volume{
|
||||||
|
Name: "work",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
Ephemeral: &corev1.EphemeralVolumeSource{
|
||||||
|
VolumeClaimTemplate: &corev1.PersistentVolumeClaimTemplate{
|
||||||
|
Spec: corev1.PersistentVolumeClaimSpec{
|
||||||
|
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce, corev1.ReadWriteMany},
|
||||||
|
StorageClassName: &storageClassName,
|
||||||
|
Resources: corev1.ResourceRequirements{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
got := workVolumeClaimTemplate.V1Volume()
|
||||||
|
|
||||||
|
if got.Name != want.Name {
|
||||||
|
t.Errorf("want name %q, got %q\n", want.Name, got.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got.VolumeSource.Ephemeral == nil {
|
||||||
|
t.Fatal("work volume claim template should transform itself into Ephemeral volume source\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
if got.VolumeSource.Ephemeral.VolumeClaimTemplate == nil {
|
||||||
|
t.Fatal("work volume claim template should have ephemeral volume claim template set\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
gotClassName := *got.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||||
|
wantClassName := *want.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||||
|
if gotClassName != wantClassName {
|
||||||
|
t.Errorf("expected storage class name %q, got %q\n", wantClassName, gotClassName)
|
||||||
|
}
|
||||||
|
|
||||||
|
gotAccessModes := got.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||||
|
wantAccessModes := want.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||||
|
if len(gotAccessModes) != len(wantAccessModes) {
|
||||||
|
t.Fatalf("access modes lengths missmatch: got %v, expected %v\n", gotAccessModes, wantAccessModes)
|
||||||
|
}
|
||||||
|
|
||||||
|
diff := make(map[corev1.PersistentVolumeAccessMode]int, len(wantAccessModes))
|
||||||
|
for _, am := range wantAccessModes {
|
||||||
|
diff[am]++
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, am := range gotAccessModes {
|
||||||
|
_, ok := diff[am]
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("got access mode %v that is not in the wanted access modes\n", am)
|
||||||
|
}
|
||||||
|
|
||||||
|
diff[am]--
|
||||||
|
if diff[am] == 0 {
|
||||||
|
delete(diff, am)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(diff) != 0 {
|
||||||
|
t.Fatalf("got access modes did not take every access mode into account\nactual: %v expected: %v\n", gotAccessModes, wantAccessModes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_workVolumeClaimTemplateV1VolumeMount(t *testing.T) {
|
||||||
|
|
||||||
|
workVolumeClaimTemplate := v1alpha1.WorkVolumeClaimTemplate{
|
||||||
|
StorageClassName: "local-storage",
|
||||||
|
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce, corev1.ReadWriteMany},
|
||||||
|
Resources: corev1.ResourceRequirements{},
|
||||||
|
}
|
||||||
|
|
||||||
|
mountPath := "/test/_work"
|
||||||
|
want := corev1.VolumeMount{
|
||||||
|
MountPath: mountPath,
|
||||||
|
Name: "work",
|
||||||
|
}
|
||||||
|
|
||||||
|
got := workVolumeClaimTemplate.V1VolumeMount(mountPath)
|
||||||
|
|
||||||
|
if want != got {
|
||||||
|
t.Fatalf("expected volume mount %+v, actual %+v\n", want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
43
docs/releasenotes/0.25.md
Normal file
43
docs/releasenotes/0.25.md
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
# actions-runner-controller v0.25.0
|
||||||
|
|
||||||
|
All planned changes in this release can be found in the milestone https://github.com/actions-runner-controller/actions-runner-controller/milestone/8.
|
||||||
|
|
||||||
|
Also see https://github.com/actions-runner-controller/actions-runner-controller/compare/v0.24.1...v0.25.0 for full changelog.
|
||||||
|
|
||||||
|
This log documents breaking changes and major enhancements
|
||||||
|
|
||||||
|
## Upgrading
|
||||||
|
|
||||||
|
In case you're using our Helm chart to deploy ARC, use the chart 0.20.0 or greater. Don't miss upgrading CRDs as usual! Helm doesn't upgrade CRDs.
|
||||||
|
|
||||||
|
## BREAKING CHANGE : Support for `--once` has been dropped
|
||||||
|
|
||||||
|
In case you're still on ARC v0.23.0 or earlier, please also read [the relevant part of v0.24.0 release note for more information](https://github.com/actions-runner-controller/actions-runner-controller/blob/master/docs/releasenotes/0.24.md#breaking-change--support-for---once-is-being-dropped).
|
||||||
|
|
||||||
|
Relevant PR(s): #1580, #1590
|
||||||
|
|
||||||
|
## ENHANCEMENT : Support for the new Kubernetes container mode of Actions runner
|
||||||
|
|
||||||
|
The GitHub Actions team has recently added `actions/runner` an ability to use [runner container hooks](https://github.com/actions/runner-container-hooks) to run job steps on Kubernetes pods instead of docker containers created by the `docker` command. It allows us to avoid the use of privileged containers while still being able to run container-backed job steps.
|
||||||
|
|
||||||
|
To use the new container mode, you set `.spec.template.spec.containerMode` in `RunnerDeployment` to `"kubernetes"`, while defining `.spec.template.spec.workVolumeClaimTemplate`. The volume claim template is used for provisioning and assigning persistent volumes mounted across the runner pod and the job pods for sharing the job workspace.
|
||||||
|
|
||||||
|
Before using this feature, we highly recommend you to read [the detailed explanation in the original pull request](https://github.com/actions-runner-controller/actions-runner-controller/pull/1546), and [the new section in ARC's documentation](https://github.com/actions-runner-controller/actions-runner-controller#runner-with-k8s-jobs).
|
||||||
|
|
||||||
|
Big kudos to @thboop and the GitHub Actions team for implementing and contributing this feature!
|
||||||
|
|
||||||
|
Relevant PR(s): #1546
|
||||||
|
|
||||||
|
## FIX : Webhook-based scaling is even more reliable
|
||||||
|
|
||||||
|
We fixed a race condition in the webhook-based autoscaler that resulted in not adding a runner when necessary.
|
||||||
|
|
||||||
|
The race condition had been happening when it received a webhook event while processing another webhook event and both ended up scaling up the same horizontal runner autoscaler at the same time.
|
||||||
|
|
||||||
|
To mitigate that, ARC now uses Kubernetes' Update API instead of Patch to update `HRA.spec.capacityReservations` which is the underlying data structure that makes the webhook-based scaler to add replicas to RunnerDeployment or RunnerSet on demand.
|
||||||
|
|
||||||
|
We were also worried about stressing the Kubernetes apiserver when your ARC webhook-based autoscaler received a lot of concurrent webhook events, we also enhanced it to batch the Update API calls for 3 seconds, which basically means it will call the Update API at most once every 3 seconds per webhook-based autoscaler instance.
|
||||||
|
|
||||||
|
Lastly, we fixed a bug in the autoscaler that resulted in it to stop adding replicas for newly received webhook events when the desired replicas reached `maxReplicas`.
|
||||||
|
|
||||||
|
Relevant PR(s): #1477, #1568
|
||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import (
|
|||||||
"github.com/actions-runner-controller/actions-runner-controller/logging"
|
"github.com/actions-runner-controller/actions-runner-controller/logging"
|
||||||
"github.com/bradleyfalzon/ghinstallation/v2"
|
"github.com/bradleyfalzon/ghinstallation/v2"
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
"github.com/gregjones/httpcache"
|
"github.com/gregjones/httpcache"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
@@ -248,7 +248,8 @@ func (c *Client) ListRunners(ctx context.Context, enterprise, org, repo string)
|
|||||||
func (c *Client) ListOrganizationRunnerGroups(ctx context.Context, org string) ([]*github.RunnerGroup, error) {
|
func (c *Client) ListOrganizationRunnerGroups(ctx context.Context, org string) ([]*github.RunnerGroup, error) {
|
||||||
var runnerGroups []*github.RunnerGroup
|
var runnerGroups []*github.RunnerGroup
|
||||||
|
|
||||||
opts := github.ListOptions{PerPage: 100}
|
opts := github.ListOrgRunnerGroupOptions{}
|
||||||
|
opts.PerPage = 100
|
||||||
for {
|
for {
|
||||||
list, res, err := c.Client.Actions.ListOrganizationRunnerGroups(ctx, org, &opts)
|
list, res, err := c.Client.Actions.ListOrganizationRunnerGroups(ctx, org, &opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -271,9 +272,21 @@ func (c *Client) ListOrganizationRunnerGroups(ctx context.Context, org string) (
|
|||||||
func (c *Client) ListOrganizationRunnerGroupsForRepository(ctx context.Context, org, repo string) ([]*github.RunnerGroup, error) {
|
func (c *Client) ListOrganizationRunnerGroupsForRepository(ctx context.Context, org, repo string) ([]*github.RunnerGroup, error) {
|
||||||
var runnerGroups []*github.RunnerGroup
|
var runnerGroups []*github.RunnerGroup
|
||||||
|
|
||||||
opts := github.ListOptions{PerPage: 100}
|
var opts github.ListOrgRunnerGroupOptions
|
||||||
|
|
||||||
|
opts.PerPage = 100
|
||||||
|
|
||||||
|
repoName := repo
|
||||||
|
parts := strings.Split(repo, "/")
|
||||||
|
if len(parts) == 2 {
|
||||||
|
repoName = parts[1]
|
||||||
|
}
|
||||||
|
// This must be the repo name without the owner part, so in case the repo is "myorg/myrepo" the repo name
|
||||||
|
// passed to visible_to_repository must be "myrepo".
|
||||||
|
opts.VisibleToRepository = repoName
|
||||||
|
|
||||||
for {
|
for {
|
||||||
list, res, err := c.listOrganizationRunnerGroupsVisibleToRepo(ctx, org, repo, &opts)
|
list, res, err := c.Actions.ListOrganizationRunnerGroups(ctx, org, &opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return runnerGroups, fmt.Errorf("failed to list organization runner groups: %w", err)
|
return runnerGroups, fmt.Errorf("failed to list organization runner groups: %w", err)
|
||||||
}
|
}
|
||||||
@@ -309,42 +322,6 @@ func (c *Client) ListRunnerGroupRepositoryAccesses(ctx context.Context, org stri
|
|||||||
return repos, nil
|
return repos, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// listOrganizationRunnerGroupsVisibleToRepo lists all self-hosted runner groups configured in an organization which can be used by the repository.
|
|
||||||
//
|
|
||||||
// GitHub API docs: https://docs.github.com/en/rest/reference/actions#list-self-hosted-runner-groups-for-an-organization
|
|
||||||
func (c *Client) listOrganizationRunnerGroupsVisibleToRepo(ctx context.Context, org, repo string, opts *github.ListOptions) (*github.RunnerGroups, *github.Response, error) {
|
|
||||||
repoName := repo
|
|
||||||
parts := strings.Split(repo, "/")
|
|
||||||
if len(parts) == 2 {
|
|
||||||
repoName = parts[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
u := fmt.Sprintf("orgs/%v/actions/runner-groups?visible_to_repository=%v", org, repoName)
|
|
||||||
|
|
||||||
if opts != nil {
|
|
||||||
if opts.PerPage > 0 {
|
|
||||||
u = fmt.Sprintf("%v&per_page=%v", u, opts.PerPage)
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.Page > 0 {
|
|
||||||
u = fmt.Sprintf("%v&page=%v", u, opts.Page)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := c.Client.NewRequest("GET", u, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
groups := &github.RunnerGroups{}
|
|
||||||
resp, err := c.Client.Do(ctx, req, &groups)
|
|
||||||
if err != nil {
|
|
||||||
return nil, resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return groups, resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanup removes expired registration tokens.
|
// cleanup removes expired registration tokens.
|
||||||
func (c *Client) cleanup() {
|
func (c *Client) cleanup() {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
var server *httptest.Server
|
var server *httptest.Server
|
||||||
|
|||||||
52
go.mod
52
go.mod
@@ -3,50 +3,60 @@ module github.com/actions-runner-controller/actions-runner-controller
|
|||||||
go 1.18
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/bradleyfalzon/ghinstallation/v2 v2.0.4
|
github.com/bradleyfalzon/ghinstallation/v2 v2.1.0
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/go-logr/logr v1.2.3
|
github.com/go-logr/logr v1.2.3
|
||||||
github.com/google/go-cmp v0.5.8
|
github.com/google/go-cmp v0.5.8
|
||||||
github.com/google/go-github/v39 v39.2.0
|
github.com/google/go-github/v45 v45.2.0
|
||||||
github.com/gorilla/mux v1.8.0
|
github.com/gorilla/mux v1.8.0
|
||||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
|
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
|
||||||
github.com/kelseyhightower/envconfig v1.4.0
|
github.com/kelseyhightower/envconfig v1.4.0
|
||||||
github.com/onsi/ginkgo v1.16.5
|
github.com/onsi/ginkgo v1.16.5
|
||||||
github.com/onsi/gomega v1.19.0
|
github.com/onsi/gomega v1.19.0
|
||||||
github.com/prometheus/client_golang v1.12.2
|
github.com/prometheus/client_golang v1.12.2
|
||||||
github.com/stretchr/testify v1.7.1
|
github.com/stretchr/testify v1.8.0
|
||||||
github.com/teambition/rrule-go v1.8.0
|
github.com/teambition/rrule-go v1.8.0
|
||||||
go.uber.org/zap v1.21.0
|
go.uber.org/zap v1.21.0
|
||||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
|
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0
|
||||||
gomodules.xyz/jsonpatch/v2 v2.2.0
|
gomodules.xyz/jsonpatch/v2 v2.2.0
|
||||||
k8s.io/api v0.23.5
|
k8s.io/api v0.24.2
|
||||||
k8s.io/apimachinery v0.23.5
|
k8s.io/apimachinery v0.24.2
|
||||||
k8s.io/client-go v0.23.5
|
k8s.io/client-go v0.24.2
|
||||||
sigs.k8s.io/controller-runtime v0.11.2
|
sigs.k8s.io/controller-runtime v0.12.3
|
||||||
sigs.k8s.io/yaml v1.3.0
|
sigs.k8s.io/yaml v1.3.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go v0.81.0 // indirect
|
cloud.google.com/go v0.81.0 // indirect
|
||||||
|
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||||
|
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
|
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
||||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||||
github.com/go-logr/zapr v1.2.0 // indirect
|
github.com/go-logr/zapr v1.2.0 // indirect
|
||||||
|
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||||
|
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
||||||
|
github.com/go-openapi/swag v0.19.14 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v4 v4.0.0 // indirect
|
github.com/golang-jwt/jwt/v4 v4.4.1 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
|
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||||
github.com/google/go-github/v41 v41.0.0 // indirect
|
github.com/google/go-github/v41 v41.0.0 // indirect
|
||||||
github.com/google/go-querystring v1.1.0 // indirect
|
github.com/google/go-querystring v1.1.0 // indirect
|
||||||
github.com/google/gofuzz v1.1.0 // indirect
|
github.com/google/gofuzz v1.1.0 // indirect
|
||||||
github.com/google/uuid v1.1.2 // indirect
|
github.com/google/uuid v1.1.2 // indirect
|
||||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||||
github.com/imdario/mergo v0.3.12 // indirect
|
github.com/imdario/mergo v0.3.12 // indirect
|
||||||
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
|
github.com/mailru/easyjson v0.7.6 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/nxadm/tail v1.4.8 // indirect
|
github.com/nxadm/tail v1.4.8 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
@@ -56,24 +66,24 @@ require (
|
|||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
go.uber.org/atomic v1.7.0 // indirect
|
go.uber.org/atomic v1.7.0 // indirect
|
||||||
go.uber.org/multierr v1.6.0 // indirect
|
go.uber.org/multierr v1.6.0 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
|
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
|
||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
|
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
google.golang.org/protobuf v1.28.0 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/apiextensions-apiserver v0.23.5 // indirect
|
k8s.io/apiextensions-apiserver v0.24.2 // indirect
|
||||||
k8s.io/component-base v0.23.5 // indirect
|
k8s.io/component-base v0.24.2 // indirect
|
||||||
k8s.io/klog/v2 v2.30.0 // indirect
|
k8s.io/klog/v2 v2.60.1 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
||||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
|
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
87
go.sum
87
go.sum
@@ -52,7 +52,9 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
|
|||||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
|
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||||
|
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||||
github.com/actions-runner-controller/httpcache v0.2.0 h1:hCNvYuVPJ2xxYBymqBvH0hSiQpqz4PHF/LbU3XghGNI=
|
github.com/actions-runner-controller/httpcache v0.2.0 h1:hCNvYuVPJ2xxYBymqBvH0hSiQpqz4PHF/LbU3XghGNI=
|
||||||
github.com/actions-runner-controller/httpcache v0.2.0/go.mod h1:JLu9/2M/btPz1Zu/vTZ71XzukQHn2YeISPmJoM5exBI=
|
github.com/actions-runner-controller/httpcache v0.2.0/go.mod h1:JLu9/2M/btPz1Zu/vTZ71XzukQHn2YeISPmJoM5exBI=
|
||||||
@@ -66,6 +68,7 @@ github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.m
|
|||||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||||
@@ -78,8 +81,11 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
|
|||||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
||||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||||
|
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||||
github.com/bradleyfalzon/ghinstallation/v2 v2.0.4 h1:tXKVfhE7FcSkhkv0UwkLvPDeZ4kz6OXd0PKPlFqf81M=
|
github.com/bradleyfalzon/ghinstallation/v2 v2.0.4 h1:tXKVfhE7FcSkhkv0UwkLvPDeZ4kz6OXd0PKPlFqf81M=
|
||||||
github.com/bradleyfalzon/ghinstallation/v2 v2.0.4/go.mod h1:B40qPqJxWE0jDZgOR1JmaMy+4AY1eBP+IByOvqyAKp0=
|
github.com/bradleyfalzon/ghinstallation/v2 v2.0.4/go.mod h1:B40qPqJxWE0jDZgOR1JmaMy+4AY1eBP+IByOvqyAKp0=
|
||||||
|
github.com/bradleyfalzon/ghinstallation/v2 v2.1.0 h1:5+NghM1Zred9Z078QEZtm28G/kfDfZN/92gkDlLwGVA=
|
||||||
|
github.com/bradleyfalzon/ghinstallation/v2 v2.1.0/go.mod h1:Xg3xPRN5Mcq6GDqeUVhFbjEWMb4JHCyWEeeBGEYQoTU=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||||
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||||
@@ -106,6 +112,7 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7
|
|||||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
@@ -117,6 +124,7 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3
|
|||||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
|
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
|
||||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
@@ -157,10 +165,13 @@ github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV
|
|||||||
github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk=
|
github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk=
|
||||||
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
|
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
|
||||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
|
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||||
|
github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
|
||||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
||||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
|
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
|
||||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||||
@@ -172,6 +183,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
|||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o=
|
github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o=
|
||||||
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ=
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
@@ -210,7 +223,10 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
|
|||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||||
github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
|
github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
|
||||||
|
github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
|
||||||
github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
|
github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
|
||||||
|
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
|
||||||
|
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
@@ -225,10 +241,10 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/go-github/v39 v39.2.0 h1:rNNM311XtPOz5rDdsJXAp2o8F67X9FnROXTvto3aSnQ=
|
|
||||||
github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE=
|
|
||||||
github.com/google/go-github/v41 v41.0.0 h1:HseJrM2JFf2vfiZJ8anY2hqBjdfY1Vlj/K27ueww4gg=
|
github.com/google/go-github/v41 v41.0.0 h1:HseJrM2JFf2vfiZJ8anY2hqBjdfY1Vlj/K27ueww4gg=
|
||||||
github.com/google/go-github/v41 v41.0.0/go.mod h1:XgmCA5H323A9rtgExdTcnDkcqp6S30AVACCBDOonIxg=
|
github.com/google/go-github/v41 v41.0.0/go.mod h1:XgmCA5H323A9rtgExdTcnDkcqp6S30AVACCBDOonIxg=
|
||||||
|
github.com/google/go-github/v45 v45.2.0 h1:5oRLszbrkvxDDqBCNj2hjDZMKmvexaZ1xw/FCD+K3FI=
|
||||||
|
github.com/google/go-github/v45 v45.2.0/go.mod h1:FObaZJEDSTa/WGCzZ2Z3eoCDXWJKMenWWTrd8jrta28=
|
||||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
@@ -295,6 +311,7 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
|
|||||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||||
|
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
@@ -327,6 +344,7 @@ github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP
|
|||||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
|
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
||||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
@@ -345,6 +363,7 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
|||||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
|
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
|
||||||
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
@@ -353,6 +372,7 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
|
|||||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
@@ -394,6 +414,7 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
|
|||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
|
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||||
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
||||||
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
@@ -421,6 +442,7 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So
|
|||||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
@@ -441,6 +463,7 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
|
|||||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
||||||
|
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
||||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
@@ -451,6 +474,7 @@ github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH
|
|||||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
@@ -459,6 +483,10 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.7.5 h1:s5PTfem8p8EbKQOctVV53k6jCJt3UX4IEJzwh+C324Q=
|
||||||
|
github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
|
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||||
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||||
github.com/teambition/rrule-go v1.8.0 h1:a/IX5s56hGkFF+nRlJUooZU/45OTeeldBGL29nDKIHw=
|
github.com/teambition/rrule-go v1.8.0 h1:a/IX5s56hGkFF+nRlJUooZU/45OTeeldBGL29nDKIHw=
|
||||||
github.com/teambition/rrule-go v1.8.0/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4=
|
github.com/teambition/rrule-go v1.8.0/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4=
|
||||||
@@ -471,12 +499,16 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
|
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||||
|
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||||
|
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
||||||
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
|
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
|
||||||
|
go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q=
|
||||||
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
|
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
|
||||||
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
|
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
|
||||||
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
|
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
|
||||||
@@ -524,6 +556,9 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
|||||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
|
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 h1:f+lwQ+GtmgoY+A2YaQxlSOnDjXcQ7ZRLWOHbC6HtRqE=
|
||||||
|
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
@@ -559,6 +594,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@@ -605,10 +641,14 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
|
|||||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
|
||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
|
||||||
|
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
@@ -623,8 +663,13 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ
|
|||||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
|
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
|
||||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26 h1:uBgVQYJLi/m8M0wzp+aGwBWt90gMRoOVf+aWTW10QHI=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 h1:VnGaRqoLmqZH/3TMLJwYCEWkR4j1nuIU1U9TvbqsDUw=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@@ -701,9 +746,14 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
|
||||||
|
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||||
@@ -724,6 +774,8 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb
|
|||||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
||||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
|
||||||
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
@@ -783,6 +835,7 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
|||||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
|
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
|
||||||
|
golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@@ -864,6 +917,7 @@ google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6D
|
|||||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||||
|
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
@@ -900,6 +954,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
|
|||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||||
|
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
@@ -931,6 +987,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
|||||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
@@ -942,34 +1000,59 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
|
|||||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA=
|
k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA=
|
||||||
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
|
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
|
||||||
|
k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI=
|
||||||
|
k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg=
|
||||||
k8s.io/apiextensions-apiserver v0.23.5 h1:5SKzdXyvIJKu+zbfPc3kCbWpbxi+O+zdmAJBm26UJqI=
|
k8s.io/apiextensions-apiserver v0.23.5 h1:5SKzdXyvIJKu+zbfPc3kCbWpbxi+O+zdmAJBm26UJqI=
|
||||||
k8s.io/apiextensions-apiserver v0.23.5/go.mod h1:ntcPWNXS8ZPKN+zTXuzYMeg731CP0heCTl6gYBxLcuQ=
|
k8s.io/apiextensions-apiserver v0.23.5/go.mod h1:ntcPWNXS8ZPKN+zTXuzYMeg731CP0heCTl6gYBxLcuQ=
|
||||||
|
k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k=
|
||||||
|
k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ=
|
||||||
k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0=
|
k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0=
|
||||||
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||||
|
k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM=
|
||||||
|
k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
|
||||||
k8s.io/apiserver v0.23.5/go.mod h1:7wvMtGJ42VRxzgVI7jkbKvMbuCbVbgsWFT7RyXiRNTw=
|
k8s.io/apiserver v0.23.5/go.mod h1:7wvMtGJ42VRxzgVI7jkbKvMbuCbVbgsWFT7RyXiRNTw=
|
||||||
|
k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI=
|
||||||
k8s.io/client-go v0.23.5 h1:zUXHmEuqx0RY4+CsnkOn5l0GU+skkRXKGJrhmE2SLd8=
|
k8s.io/client-go v0.23.5 h1:zUXHmEuqx0RY4+CsnkOn5l0GU+skkRXKGJrhmE2SLd8=
|
||||||
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
|
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
|
||||||
|
k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA=
|
||||||
|
k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30=
|
||||||
k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
||||||
|
k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
|
||||||
k8s.io/component-base v0.23.5 h1:8qgP5R6jG1BBSXmRYW+dsmitIrpk8F/fPEvgDenMCCE=
|
k8s.io/component-base v0.23.5 h1:8qgP5R6jG1BBSXmRYW+dsmitIrpk8F/fPEvgDenMCCE=
|
||||||
k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0=
|
k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0=
|
||||||
|
k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU=
|
||||||
|
k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM=
|
||||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||||
|
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||||
k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
|
k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
|
||||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
|
k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
|
||||||
|
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
|
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
|
||||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
|
||||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE=
|
k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE=
|
||||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||||
|
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
|
||||||
|
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
|
||||||
sigs.k8s.io/controller-runtime v0.11.2 h1:H5GTxQl0Mc9UjRJhORusqfJCIjBO8UtUxGggCwL1rLA=
|
sigs.k8s.io/controller-runtime v0.11.2 h1:H5GTxQl0Mc9UjRJhORusqfJCIjBO8UtUxGggCwL1rLA=
|
||||||
sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8MRFsn4dWF7O4=
|
sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8MRFsn4dWF7O4=
|
||||||
|
sigs.k8s.io/controller-runtime v0.12.2 h1:nqV02cvhbAj7tbt21bpPpTByrXGn2INHRsi39lXy9sE=
|
||||||
|
sigs.k8s.io/controller-runtime v0.12.2/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0=
|
||||||
|
sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio=
|
||||||
|
sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0=
|
||||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
|
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
|
||||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||||
|
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
|
||||||
|
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||||
|
|||||||
63
main.go
63
main.go
@@ -68,14 +68,14 @@ func main() {
|
|||||||
err error
|
err error
|
||||||
ghClient *github.Client
|
ghClient *github.Client
|
||||||
|
|
||||||
metricsAddr string
|
metricsAddr string
|
||||||
enableLeaderElection bool
|
enableLeaderElection bool
|
||||||
leaderElectionId string
|
runnerStatusUpdateHook bool
|
||||||
port int
|
leaderElectionId string
|
||||||
syncPeriod time.Duration
|
port int
|
||||||
|
syncPeriod time.Duration
|
||||||
|
|
||||||
gitHubAPICacheDuration time.Duration
|
defaultScaleDownDelay time.Duration
|
||||||
defaultScaleDownDelay time.Duration
|
|
||||||
|
|
||||||
runnerImage string
|
runnerImage string
|
||||||
runnerImagePullSecrets stringSlice
|
runnerImagePullSecrets stringSlice
|
||||||
@@ -112,7 +112,7 @@ func main() {
|
|||||||
flag.StringVar(&c.BasicauthUsername, "github-basicauth-username", c.BasicauthUsername, "Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API")
|
flag.StringVar(&c.BasicauthUsername, "github-basicauth-username", c.BasicauthUsername, "Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API")
|
||||||
flag.StringVar(&c.BasicauthPassword, "github-basicauth-password", c.BasicauthPassword, "Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API")
|
flag.StringVar(&c.BasicauthPassword, "github-basicauth-password", c.BasicauthPassword, "Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API")
|
||||||
flag.StringVar(&c.RunnerGitHubURL, "runner-github-url", c.RunnerGitHubURL, "GitHub URL to be used by runners during registration")
|
flag.StringVar(&c.RunnerGitHubURL, "runner-github-url", c.RunnerGitHubURL, "GitHub URL to be used by runners during registration")
|
||||||
flag.DurationVar(&gitHubAPICacheDuration, "github-api-cache-duration", 0, "DEPRECATED: The duration until the GitHub API cache expires. Setting this to e.g. 10m results in the controller tries its best not to make the same API call within 10m to reduce the chance of being rate-limited. Defaults to mostly the same value as sync-period. If you're tweaking this in order to make autoscaling more responsive, you'll probably want to tweak sync-period, too")
|
flag.BoolVar(&runnerStatusUpdateHook, "runner-status-update-hook", false, "Use custom RBAC for runners (role, role binding and service account).")
|
||||||
flag.DurationVar(&defaultScaleDownDelay, "default-scale-down-delay", controllers.DefaultScaleDownDelay, "The approximate delay for a scale down followed by a scale up, used to prevent flapping (down->up->down->... loop)")
|
flag.DurationVar(&defaultScaleDownDelay, "default-scale-down-delay", controllers.DefaultScaleDownDelay, "The approximate delay for a scale down followed by a scale up, used to prevent flapping (down->up->down->... loop)")
|
||||||
flag.IntVar(&port, "port", 9443, "The port to which the admission webhook endpoint should bind")
|
flag.IntVar(&port, "port", 9443, "The port to which the admission webhook endpoint should bind")
|
||||||
flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.")
|
flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.")
|
||||||
@@ -147,13 +147,19 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
multiClient := controllers.NewMultiGitHubClient(
|
||||||
|
mgr.GetClient(),
|
||||||
|
ghClient,
|
||||||
|
)
|
||||||
|
|
||||||
runnerReconciler := &controllers.RunnerReconciler{
|
runnerReconciler := &controllers.RunnerReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Log: log.WithName("runner"),
|
Log: log.WithName("runner"),
|
||||||
Scheme: mgr.GetScheme(),
|
Scheme: mgr.GetScheme(),
|
||||||
GitHubClient: ghClient,
|
GitHubClient: multiClient,
|
||||||
DockerImage: dockerImage,
|
DockerImage: dockerImage,
|
||||||
DockerRegistryMirror: dockerRegistryMirror,
|
DockerRegistryMirror: dockerRegistryMirror,
|
||||||
|
UseRunnerStatusUpdateHook: runnerStatusUpdateHook,
|
||||||
// Defaults for self-hosted runner containers
|
// Defaults for self-hosted runner containers
|
||||||
RunnerImage: runnerImage,
|
RunnerImage: runnerImage,
|
||||||
RunnerImagePullSecrets: runnerImagePullSecrets,
|
RunnerImagePullSecrets: runnerImagePullSecrets,
|
||||||
@@ -165,10 +171,9 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
runnerReplicaSetReconciler := &controllers.RunnerReplicaSetReconciler{
|
runnerReplicaSetReconciler := &controllers.RunnerReplicaSetReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Log: log.WithName("runnerreplicaset"),
|
Log: log.WithName("runnerreplicaset"),
|
||||||
Scheme: mgr.GetScheme(),
|
Scheme: mgr.GetScheme(),
|
||||||
GitHubClient: ghClient,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = runnerReplicaSetReconciler.SetupWithManager(mgr); err != nil {
|
if err = runnerReplicaSetReconciler.SetupWithManager(mgr); err != nil {
|
||||||
@@ -195,27 +200,20 @@ func main() {
|
|||||||
CommonRunnerLabels: commonRunnerLabels,
|
CommonRunnerLabels: commonRunnerLabels,
|
||||||
DockerImage: dockerImage,
|
DockerImage: dockerImage,
|
||||||
DockerRegistryMirror: dockerRegistryMirror,
|
DockerRegistryMirror: dockerRegistryMirror,
|
||||||
GitHubBaseURL: ghClient.GithubBaseURL,
|
GitHubClient: multiClient,
|
||||||
// Defaults for self-hosted runner containers
|
// Defaults for self-hosted runner containers
|
||||||
RunnerImage: runnerImage,
|
RunnerImage: runnerImage,
|
||||||
RunnerImagePullSecrets: runnerImagePullSecrets,
|
RunnerImagePullSecrets: runnerImagePullSecrets,
|
||||||
|
UseRunnerStatusUpdateHook: runnerStatusUpdateHook,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = runnerSetReconciler.SetupWithManager(mgr); err != nil {
|
if err = runnerSetReconciler.SetupWithManager(mgr); err != nil {
|
||||||
log.Error(err, "unable to create controller", "controller", "RunnerSet")
|
log.Error(err, "unable to create controller", "controller", "RunnerSet")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
if gitHubAPICacheDuration == 0 {
|
|
||||||
gitHubAPICacheDuration = syncPeriod - 10*time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
if gitHubAPICacheDuration < 0 {
|
|
||||||
gitHubAPICacheDuration = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Info(
|
log.Info(
|
||||||
"Initializing actions-runner-controller",
|
"Initializing actions-runner-controller",
|
||||||
"github-api-cache-duration", gitHubAPICacheDuration,
|
|
||||||
"default-scale-down-delay", defaultScaleDownDelay,
|
"default-scale-down-delay", defaultScaleDownDelay,
|
||||||
"sync-period", syncPeriod,
|
"sync-period", syncPeriod,
|
||||||
"default-runner-image", runnerImage,
|
"default-runner-image", runnerImage,
|
||||||
@@ -230,8 +228,7 @@ func main() {
|
|||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Log: log.WithName("horizontalrunnerautoscaler"),
|
Log: log.WithName("horizontalrunnerautoscaler"),
|
||||||
Scheme: mgr.GetScheme(),
|
Scheme: mgr.GetScheme(),
|
||||||
GitHubClient: ghClient,
|
GitHubClient: multiClient,
|
||||||
CacheDuration: gitHubAPICacheDuration,
|
|
||||||
DefaultScaleDownDelay: defaultScaleDownDelay,
|
DefaultScaleDownDelay: defaultScaleDownDelay,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -239,7 +236,7 @@ func main() {
|
|||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Log: log.WithName("runnerpod"),
|
Log: log.WithName("runnerpod"),
|
||||||
Scheme: mgr.GetScheme(),
|
Scheme: mgr.GetScheme(),
|
||||||
GitHubClient: ghClient,
|
GitHubClient: multiClient,
|
||||||
}
|
}
|
||||||
|
|
||||||
runnerPersistentVolumeReconciler := &controllers.RunnerPersistentVolumeReconciler{
|
runnerPersistentVolumeReconciler := &controllers.RunnerPersistentVolumeReconciler{
|
||||||
@@ -290,7 +287,7 @@ func main() {
|
|||||||
|
|
||||||
injector := &controllers.PodRunnerTokenInjector{
|
injector := &controllers.PodRunnerTokenInjector{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
GitHubClient: ghClient,
|
GitHubClient: multiClient,
|
||||||
Log: ctrl.Log.WithName("webhook").WithName("PodRunnerTokenInjector"),
|
Log: ctrl.Log.WithName("webhook").WithName("PodRunnerTokenInjector"),
|
||||||
}
|
}
|
||||||
if err = injector.SetupWithManager(mgr); err != nil {
|
if err = injector.SetupWithManager(mgr); err != nil {
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
gogithub "github.com/google/go-github/v39/github"
|
gogithub "github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
type server struct {
|
type server struct {
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
gogithub "github.com/google/go-github/v39/github"
|
gogithub "github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Forwarder struct {
|
type Forwarder struct {
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package hookdeliveryforwarder
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
gogithub "github.com/google/go-github/v39/github"
|
gogithub "github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
type hooksAPI struct {
|
type hooksAPI struct {
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package hookdeliveryforwarder
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
gogithub "github.com/google/go-github/v39/github"
|
gogithub "github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
type hookDeliveriesAPI struct {
|
type hookDeliveriesAPI struct {
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
gogithub "github.com/google/go-github/v39/github"
|
gogithub "github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
type MultiForwarder struct {
|
type MultiForwarder struct {
|
||||||
|
|||||||
@@ -4,7 +4,8 @@ DIND_RUNNER_NAME ?= ${DOCKER_USER}/actions-runner-dind
|
|||||||
TAG ?= latest
|
TAG ?= latest
|
||||||
TARGETPLATFORM ?= $(shell arch)
|
TARGETPLATFORM ?= $(shell arch)
|
||||||
|
|
||||||
RUNNER_VERSION ?= 2.293.0
|
RUNNER_VERSION ?= 2.294.0
|
||||||
|
RUNNER_CONTAINER_HOOKS_VERSION ?= 0.1.2
|
||||||
DOCKER_VERSION ?= 20.10.12
|
DOCKER_VERSION ?= 20.10.12
|
||||||
|
|
||||||
# default list of platforms for which multiarch image is built
|
# default list of platforms for which multiarch image is built
|
||||||
@@ -28,6 +29,7 @@ docker-build-ubuntu:
|
|||||||
docker build \
|
docker build \
|
||||||
--build-arg TARGETPLATFORM=${TARGETPLATFORM} \
|
--build-arg TARGETPLATFORM=${TARGETPLATFORM} \
|
||||||
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
||||||
|
--build-arg RUNNER_CONTAINER_HOOKS_VERSION=${RUNNER_CONTAINER_HOOKS_VERSION} \
|
||||||
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
||||||
-f actions-runner.dockerfile \
|
-f actions-runner.dockerfile \
|
||||||
-t ${NAME}:${TAG} .
|
-t ${NAME}:${TAG} .
|
||||||
@@ -50,12 +52,14 @@ docker-buildx-ubuntu:
|
|||||||
fi
|
fi
|
||||||
docker buildx build --platform ${PLATFORMS} \
|
docker buildx build --platform ${PLATFORMS} \
|
||||||
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
||||||
|
--build-arg RUNNER_CONTAINER_HOOKS_VERSION=${RUNNER_CONTAINER_HOOKS_VERSION} \
|
||||||
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
||||||
-f actions-runner.dockerfile \
|
-f actions-runner.dockerfile \
|
||||||
-t "${NAME}:${TAG}" \
|
-t "${NAME}:${TAG}" \
|
||||||
. ${PUSH_ARG}
|
. ${PUSH_ARG}
|
||||||
docker buildx build --platform ${PLATFORMS} \
|
docker buildx build --platform ${PLATFORMS} \
|
||||||
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
||||||
|
--build-arg RUNNER_CONTAINER_HOOKS_VERSION=${RUNNER_CONTAINER_HOOKS_VERSION} \
|
||||||
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
||||||
-f actions-runner-dind.dockerfile \
|
-f actions-runner-dind.dockerfile \
|
||||||
-t "${DIND_RUNNER_NAME}:${TAG}" \
|
-t "${DIND_RUNNER_NAME}:${TAG}" \
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
ARG RUNNER_VERSION=2.293.0
|
ARG RUNNER_VERSION=2.294.0
|
||||||
ARG DOCKER_CHANNEL=stable
|
ARG DOCKER_CHANNEL=stable
|
||||||
ARG DOCKER_VERSION=20.10.12
|
ARG DOCKER_VERSION=20.10.12
|
||||||
ARG DUMB_INIT_VERSION=1.2.5
|
ARG DUMB_INIT_VERSION=1.2.5
|
||||||
@@ -74,8 +74,6 @@ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
|||||||
dockerd --version; \
|
dockerd --version; \
|
||||||
docker --version
|
docker --version
|
||||||
|
|
||||||
ENV HOME=/home/runner
|
|
||||||
|
|
||||||
# Runner download supports amd64 as x64
|
# Runner download supports amd64 as x64
|
||||||
#
|
#
|
||||||
# libyaml-dev is required for ruby/setup-ruby action.
|
# libyaml-dev is required for ruby/setup-ruby action.
|
||||||
@@ -100,10 +98,13 @@ RUN mkdir /opt/hostedtoolcache \
|
|||||||
|
|
||||||
# We place the scripts in `/usr/bin` so that users who extend this image can
|
# We place the scripts in `/usr/bin` so that users who extend this image can
|
||||||
# override them with scripts of the same name placed in `/usr/local/bin`.
|
# override them with scripts of the same name placed in `/usr/local/bin`.
|
||||||
COPY entrypoint.sh logger.bash startup.sh /usr/bin/
|
COPY entrypoint.sh logger.bash startup.sh update-status /usr/bin/
|
||||||
COPY supervisor/ /etc/supervisor/conf.d/
|
COPY supervisor/ /etc/supervisor/conf.d/
|
||||||
RUN chmod +x /usr/bin/startup.sh /usr/bin/entrypoint.sh
|
RUN chmod +x /usr/bin/startup.sh /usr/bin/entrypoint.sh
|
||||||
|
|
||||||
|
# Configure hooks folder structure.
|
||||||
|
COPY hooks /etc/arc/hooks/
|
||||||
|
|
||||||
# arch command on OS X reports "i386" for Intel CPUs regardless of bitness
|
# arch command on OS X reports "i386" for Intel CPUs regardless of bitness
|
||||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||||
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
||||||
@@ -113,6 +114,7 @@ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
|||||||
|
|
||||||
VOLUME /var/lib/docker
|
VOLUME /var/lib/docker
|
||||||
|
|
||||||
|
ENV HOME=/home/runner
|
||||||
# Add the Python "User Script Directory" to the PATH
|
# Add the Python "User Script Directory" to the PATH
|
||||||
ENV PATH="${PATH}:${HOME}/.local/bin"
|
ENV PATH="${PATH}:${HOME}/.local/bin"
|
||||||
ENV ImageOS=ubuntu20
|
ENV ImageOS=ubuntu20
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
ARG RUNNER_VERSION=2.293.0
|
ARG RUNNER_VERSION=2.294.0
|
||||||
|
ARG RUNNER_CONTAINER_HOOKS_VERSION=0.1.2
|
||||||
ARG DOCKER_CHANNEL=stable
|
ARG DOCKER_CHANNEL=stable
|
||||||
ARG DOCKER_VERSION=20.10.12
|
ARG DOCKER_VERSION=20.10.12
|
||||||
ARG DUMB_INIT_VERSION=1.2.5
|
ARG DUMB_INIT_VERSION=1.2.5
|
||||||
@@ -66,8 +67,6 @@ RUN set -vx; \
|
|||||||
&& usermod -aG docker runner \
|
&& usermod -aG docker runner \
|
||||||
&& echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers
|
&& echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers
|
||||||
|
|
||||||
ENV HOME=/home/runner
|
|
||||||
|
|
||||||
# Uncomment the below COPY to use your own custom build of actions-runner.
|
# Uncomment the below COPY to use your own custom build of actions-runner.
|
||||||
#
|
#
|
||||||
# To build a custom runner:
|
# To build a custom runner:
|
||||||
@@ -105,6 +104,11 @@ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
|||||||
&& apt-get install -y libyaml-dev \
|
&& apt-get install -y libyaml-dev \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN cd "$RUNNER_ASSETS_DIR" \
|
||||||
|
&& curl -f -L -o runner-container-hooks.zip https://github.com/actions/runner-container-hooks/releases/download/v${RUNNER_CONTAINER_HOOKS_VERSION}/actions-runner-hooks-k8s-${RUNNER_CONTAINER_HOOKS_VERSION}.zip \
|
||||||
|
&& unzip ./runner-container-hooks.zip -d ./k8s \
|
||||||
|
&& rm runner-container-hooks.zip
|
||||||
|
|
||||||
ENV RUNNER_TOOL_CACHE=/opt/hostedtoolcache
|
ENV RUNNER_TOOL_CACHE=/opt/hostedtoolcache
|
||||||
RUN mkdir /opt/hostedtoolcache \
|
RUN mkdir /opt/hostedtoolcache \
|
||||||
&& chgrp docker /opt/hostedtoolcache \
|
&& chgrp docker /opt/hostedtoolcache \
|
||||||
@@ -112,8 +116,12 @@ RUN mkdir /opt/hostedtoolcache \
|
|||||||
|
|
||||||
# We place the scripts in `/usr/bin` so that users who extend this image can
|
# We place the scripts in `/usr/bin` so that users who extend this image can
|
||||||
# override them with scripts of the same name placed in `/usr/local/bin`.
|
# override them with scripts of the same name placed in `/usr/local/bin`.
|
||||||
COPY entrypoint.sh logger.bash /usr/bin/
|
COPY entrypoint.sh logger.bash update-status /usr/bin/
|
||||||
|
|
||||||
|
# Configure hooks folder structure.
|
||||||
|
COPY hooks /etc/arc/hooks/
|
||||||
|
|
||||||
|
ENV HOME=/home/runner
|
||||||
# Add the Python "User Script Directory" to the PATH
|
# Add the Python "User Script Directory" to the PATH
|
||||||
ENV PATH="${PATH}:${HOME}/.local/bin"
|
ENV PATH="${PATH}:${HOME}/.local/bin"
|
||||||
ENV ImageOS=ubuntu20
|
ENV ImageOS=ubuntu20
|
||||||
|
|||||||
@@ -4,19 +4,18 @@ source logger.bash
|
|||||||
RUNNER_ASSETS_DIR=${RUNNER_ASSETS_DIR:-/runnertmp}
|
RUNNER_ASSETS_DIR=${RUNNER_ASSETS_DIR:-/runnertmp}
|
||||||
RUNNER_HOME=${RUNNER_HOME:-/runner}
|
RUNNER_HOME=${RUNNER_HOME:-/runner}
|
||||||
|
|
||||||
|
# Let GitHub runner execute these hooks. These environment variables are used by GitHub's Runner as described here
|
||||||
|
# https://github.com/actions/runner/blob/main/docs/adrs/1751-runner-job-hooks.md
|
||||||
|
# Scripts referenced in the ACTIONS_RUNNER_HOOK_ environment variables must end in .sh or .ps1
|
||||||
|
# for it to become a valid hook script, otherwise GitHub will fail to run the hook
|
||||||
|
export ACTIONS_RUNNER_HOOK_JOB_STARTED=/etc/arc/hooks/job-started.sh
|
||||||
|
export ACTIONS_RUNNER_HOOK_JOB_COMPLETED=/etc/arc/hooks/job-completed.sh
|
||||||
|
|
||||||
if [ ! -z "${STARTUP_DELAY_IN_SECONDS}" ]; then
|
if [ ! -z "${STARTUP_DELAY_IN_SECONDS}" ]; then
|
||||||
log.notice "Delaying startup by ${STARTUP_DELAY_IN_SECONDS} seconds"
|
log.notice "Delaying startup by ${STARTUP_DELAY_IN_SECONDS} seconds"
|
||||||
sleep ${STARTUP_DELAY_IN_SECONDS}
|
sleep ${STARTUP_DELAY_IN_SECONDS}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${DISABLE_WAIT_FOR_DOCKER}" != "true" ]] && [[ "${DOCKER_ENABLED}" == "true" ]]; then
|
|
||||||
log.debug 'Docker enabled runner detected and Docker daemon wait is enabled'
|
|
||||||
log.debug 'Waiting until Docker is available or the timeout is reached'
|
|
||||||
timeout 120s bash -c 'until docker ps ;do sleep 1; done'
|
|
||||||
else
|
|
||||||
log.notice 'Docker wait check skipped. Either Docker is disabled or the wait is disabled, continuing with entrypoint'
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "${GITHUB_URL}" ]; then
|
if [ -z "${GITHUB_URL}" ]; then
|
||||||
log.debug 'Working with public GitHub'
|
log.debug 'Working with public GitHub'
|
||||||
GITHUB_URL="https://github.com/"
|
GITHUB_URL="https://github.com/"
|
||||||
@@ -85,6 +84,8 @@ if [ "${DISABLE_RUNNER_UPDATE:-}" == "true" ]; then
|
|||||||
log.debug 'Passing --disableupdate to config.sh to disable automatic runner updates.'
|
log.debug 'Passing --disableupdate to config.sh to disable automatic runner updates.'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
update-status "Registering"
|
||||||
|
|
||||||
retries_left=10
|
retries_left=10
|
||||||
while [[ ${retries_left} -gt 0 ]]; do
|
while [[ ${retries_left} -gt 0 ]]; do
|
||||||
log.debug 'Configuring the runner.'
|
log.debug 'Configuring the runner.'
|
||||||
@@ -140,13 +141,12 @@ if [ -z "${UNITTEST:-}" ] && [ -e ./externalstmp ]; then
|
|||||||
mv ./externalstmp/* ./externals/
|
mv ./externalstmp/* ./externals/
|
||||||
fi
|
fi
|
||||||
|
|
||||||
args=()
|
if [[ "${DISABLE_WAIT_FOR_DOCKER}" != "true" ]] && [[ "${DOCKER_ENABLED}" == "true" ]]; then
|
||||||
if [ "${RUNNER_FEATURE_FLAG_ONCE:-}" == "true" -a "${RUNNER_EPHEMERAL}" == "true" ]; then
|
log.debug 'Docker enabled runner detected and Docker daemon wait is enabled'
|
||||||
args+=(--once)
|
log.debug 'Waiting until Docker is available or the timeout is reached'
|
||||||
log.warning 'Passing --once is deprecated and will be removed as an option' \
|
timeout 120s bash -c 'until docker ps ;do sleep 1; done'
|
||||||
'from the image and actions-runner-controller at the release of 0.25.0.' \
|
else
|
||||||
'Upgrade to GHES => 3.3 to continue using actions-runner-controller. If' \
|
log.notice 'Docker wait check skipped. Either Docker is disabled or the wait is disabled, continuing with entrypoint'
|
||||||
'you are using github.com ignore this warning.'
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Unset entrypoint environment variables so they don't leak into the runner environment
|
# Unset entrypoint environment variables so they don't leak into the runner environment
|
||||||
@@ -164,4 +164,5 @@ unset RUNNER_NAME RUNNER_REPO RUNNER_TOKEN STARTUP_DELAY_IN_SECONDS DISABLE_WAIT
|
|||||||
if [ -z "${UNITTEST:-}" ]; then
|
if [ -z "${UNITTEST:-}" ]; then
|
||||||
mapfile -t env </etc/environment
|
mapfile -t env </etc/environment
|
||||||
fi
|
fi
|
||||||
exec env -- "${env[@]}" ./run.sh "${args[@]}"
|
update-status "Idle"
|
||||||
|
exec env -- "${env[@]}" ./run.sh
|
||||||
|
|||||||
4
runner/hooks/job-completed.d/update-status
Executable file
4
runner/hooks/job-completed.d/update-status
Executable file
@@ -0,0 +1,4 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -u
|
||||||
|
|
||||||
|
exec update-status Idle
|
||||||
12
runner/hooks/job-completed.sh
Executable file
12
runner/hooks/job-completed.sh
Executable file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -Eeuo pipefail
|
||||||
|
|
||||||
|
# shellcheck source=runner/logger.bash
|
||||||
|
source logger.bash
|
||||||
|
|
||||||
|
log.debug "Running ARC Job Completed Hooks"
|
||||||
|
|
||||||
|
for hook in /etc/arc/hooks/job-completed.d/*; do
|
||||||
|
log.debug "Running hook: $hook"
|
||||||
|
"$hook" "$@"
|
||||||
|
done
|
||||||
4
runner/hooks/job-started.d/update-status
Executable file
4
runner/hooks/job-started.d/update-status
Executable file
@@ -0,0 +1,4 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -u
|
||||||
|
|
||||||
|
exec update-status Running "Run $GITHUB_RUN_ID from $GITHUB_REPOSITORY"
|
||||||
12
runner/hooks/job-started.sh
Normal file
12
runner/hooks/job-started.sh
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -Eeuo pipefail
|
||||||
|
|
||||||
|
# shellcheck source=runner/logger.bash
|
||||||
|
source logger.bash
|
||||||
|
|
||||||
|
log.debug "Running ARC Job Started Hooks"
|
||||||
|
|
||||||
|
for hook in /etc/arc/hooks/job-started.d/*; do
|
||||||
|
log.debug "Running hook: $hook"
|
||||||
|
"$hook" "$@"
|
||||||
|
done
|
||||||
@@ -20,7 +20,9 @@ function wait_for_process () {
|
|||||||
sudo /bin/bash <<SCRIPT
|
sudo /bin/bash <<SCRIPT
|
||||||
mkdir -p /etc/docker
|
mkdir -p /etc/docker
|
||||||
|
|
||||||
echo "{}" > /etc/docker/daemon.json
|
if [ ! -f /etc/docker/daemon.json ]; then
|
||||||
|
echo "{}" > /etc/docker/daemon.json
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -n "${MTU}" ]; then
|
if [ -n "${MTU}" ]; then
|
||||||
jq ".\"mtu\" = ${MTU}" /etc/docker/daemon.json > /tmp/.daemon.json && mv /tmp/.daemon.json /etc/docker/daemon.json
|
jq ".\"mtu\" = ${MTU}" /etc/docker/daemon.json > /tmp/.daemon.json && mv /tmp/.daemon.json /etc/docker/daemon.json
|
||||||
|
|||||||
31
runner/update-status
Executable file
31
runner/update-status
Executable file
@@ -0,0 +1,31 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -Eeuo pipefail
|
||||||
|
|
||||||
|
if [[ ${1:-} == '' ]]; then
|
||||||
|
# shellcheck source=runner/logger.bash
|
||||||
|
source logger.bash
|
||||||
|
log.error "Missing required argument -- '<phase>'"
|
||||||
|
exit 64
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ${RUNNER_STATUS_UPDATE_HOOK:-false} == true ]]; then
|
||||||
|
|
||||||
|
apiserver=https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT_HTTPS}
|
||||||
|
serviceaccount=/var/run/secrets/kubernetes.io/serviceaccount
|
||||||
|
namespace=$(cat ${serviceaccount}/namespace)
|
||||||
|
token=$(cat ${serviceaccount}/token)
|
||||||
|
phase=$1
|
||||||
|
shift
|
||||||
|
|
||||||
|
jq -n --arg phase "$phase" --arg message "${*:-}" '.status.phase = $phase | .status.message = $message' | curl \
|
||||||
|
--cacert ${serviceaccount}/ca.crt \
|
||||||
|
--data @- \
|
||||||
|
--noproxy '*' \
|
||||||
|
--header "Content-Type: application/merge-patch+json" \
|
||||||
|
--header "Authorization: Bearer ${token}" \
|
||||||
|
--show-error \
|
||||||
|
--silent \
|
||||||
|
--request PATCH \
|
||||||
|
"${apiserver}/apis/actions.summerwind.dev/v1alpha1/namespaces/${namespace}/runners/${HOSTNAME}/status"
|
||||||
|
1>&-
|
||||||
|
fi
|
||||||
@@ -5,10 +5,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Simulator struct {
|
type Simulator struct {
|
||||||
Client *github.Client
|
Client *github.Client
|
||||||
|
Log logr.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Simulator) GetRunnerGroupsVisibleToRepository(ctx context.Context, org, repo string, managed *VisibleRunnerGroups) (*VisibleRunnerGroups, error) {
|
func (c *Simulator) GetRunnerGroupsVisibleToRepository(ctx context.Context, org, repo string, managed *VisibleRunnerGroups) (*VisibleRunnerGroups, error) {
|
||||||
@@ -24,6 +26,10 @@ func (c *Simulator) GetRunnerGroupsVisibleToRepository(ctx context.Context, org,
|
|||||||
return visible, err
|
return visible, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.Log.V(3).Enabled() {
|
||||||
|
c.Log.V(3).Info("ListOrganizationRunnerGroupsForRepository succeeded", "runerGroups", runnerGroups)
|
||||||
|
}
|
||||||
|
|
||||||
for _, runnerGroup := range runnerGroups {
|
for _, runnerGroup := range runnerGroups {
|
||||||
ref := NewRunnerGroupFromGitHub(runnerGroup)
|
ref := NewRunnerGroupFromGitHub(runnerGroup)
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/google/go-github/v39/github"
|
"github.com/google/go-github/v45/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RunnerGroupScope int
|
type RunnerGroupScope int
|
||||||
|
|||||||
@@ -13,54 +13,15 @@ import (
|
|||||||
"sigs.k8s.io/yaml"
|
"sigs.k8s.io/yaml"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type DeployKind int
|
||||||
|
|
||||||
|
const (
|
||||||
|
RunnerSets DeployKind = iota
|
||||||
|
RunnerDeployments
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
controllerImageRepo = "actionsrunnercontrollere2e/actions-runner-controller"
|
certManagerVersion = "v1.8.2"
|
||||||
controllerImageTag = "e2e"
|
|
||||||
controllerImage = testing.Img(controllerImageRepo, controllerImageTag)
|
|
||||||
runnerImageRepo = "actionsrunnercontrollere2e/actions-runner"
|
|
||||||
runnerDindImageRepo = "actionsrunnercontrollere2e/actions-runner-dind"
|
|
||||||
runnerImageTag = "e2e"
|
|
||||||
runnerImage = testing.Img(runnerImageRepo, runnerImageTag)
|
|
||||||
runnerDindImage = testing.Img(runnerDindImageRepo, runnerImageTag)
|
|
||||||
|
|
||||||
prebuildImages = []testing.ContainerImage{
|
|
||||||
controllerImage,
|
|
||||||
runnerImage,
|
|
||||||
runnerDindImage,
|
|
||||||
}
|
|
||||||
|
|
||||||
builds = []testing.DockerBuild{
|
|
||||||
{
|
|
||||||
Dockerfile: "../../Dockerfile",
|
|
||||||
Args: []testing.BuildArg{},
|
|
||||||
Image: controllerImage,
|
|
||||||
EnableBuildX: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Dockerfile: "../../runner/actions-runner.dockerfile",
|
|
||||||
Args: []testing.BuildArg{
|
|
||||||
{
|
|
||||||
Name: "RUNNER_VERSION",
|
|
||||||
Value: "2.291.1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Image: runnerImage,
|
|
||||||
EnableBuildX: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Dockerfile: "../../runner/actions-runner-dind.dockerfile",
|
|
||||||
Args: []testing.BuildArg{
|
|
||||||
{
|
|
||||||
Name: "RUNNER_VERSION",
|
|
||||||
Value: "2.291.1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Image: runnerDindImage,
|
|
||||||
EnableBuildX: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
certManagerVersion = "v1.1.1"
|
|
||||||
|
|
||||||
images = []testing.ContainerImage{
|
images = []testing.ContainerImage{
|
||||||
testing.Img("docker", "dind"),
|
testing.Img("docker", "dind"),
|
||||||
@@ -70,13 +31,6 @@ var (
|
|||||||
testing.Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion),
|
testing.Img("quay.io/jetstack/cert-manager-webhook", certManagerVersion),
|
||||||
}
|
}
|
||||||
|
|
||||||
commonScriptEnv = []string{
|
|
||||||
"SYNC_PERIOD=" + "30m",
|
|
||||||
"NAME=" + controllerImageRepo,
|
|
||||||
"VERSION=" + controllerImageTag,
|
|
||||||
"RUNNER_TAG=" + runnerImageTag,
|
|
||||||
}
|
|
||||||
|
|
||||||
testResultCMNamePrefix = "test-result-"
|
testResultCMNamePrefix = "test-result-"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -101,18 +55,47 @@ var (
|
|||||||
// whenever the whole test failed, so that you can immediately start fixing issues and rerun inidividual tests.
|
// whenever the whole test failed, so that you can immediately start fixing issues and rerun inidividual tests.
|
||||||
// See the below link for how terratest handles this:
|
// See the below link for how terratest handles this:
|
||||||
// https://terratest.gruntwork.io/docs/testing-best-practices/iterating-locally-using-test-stages/
|
// https://terratest.gruntwork.io/docs/testing-best-practices/iterating-locally-using-test-stages/
|
||||||
|
//
|
||||||
|
// This functions leaves PVs undeleted. To delete PVs, run:
|
||||||
|
// kubectl get pv -ojson | jq -rMc '.items[] | select(.status.phase == "Available") | {name:.metadata.name, status:.status.phase} | .name' | xargs kubectl delete pv
|
||||||
|
//
|
||||||
|
// If you disk full after dozens of test runs, try:
|
||||||
|
// docker system prune
|
||||||
|
// and
|
||||||
|
// kind delete cluster --name teste2e
|
||||||
|
//
|
||||||
|
// The former tend to release 200MB-3GB and the latter can result in releasing like 100GB due to kind node contains loaded container images and
|
||||||
|
// (in case you use it) local provisioners disk image(which is implemented as a directory within the kind node).
|
||||||
func TestE2E(t *testing.T) {
|
func TestE2E(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("Skipped as -short is set")
|
t.Skip("Skipped as -short is set")
|
||||||
}
|
}
|
||||||
|
|
||||||
env := initTestEnv(t)
|
k8sMinorVer := os.Getenv("ARC_E2E_KUBE_VERSION")
|
||||||
env.useRunnerSet = true
|
skipRunnerCleanUp := os.Getenv("ARC_E2E_SKIP_RUNNER_CLEANUP") != ""
|
||||||
|
retainCluster := os.Getenv("ARC_E2E_RETAIN_CLUSTER") != ""
|
||||||
|
skipTestIDCleanUp := os.Getenv("ARC_E2E_SKIP_TEST_ID_CLEANUP") != ""
|
||||||
|
skipArgoTunnelCleanUp := os.Getenv("ARC_E2E_SKIP_ARGO_TUNNEL_CLEAN_UP") != ""
|
||||||
|
|
||||||
|
vars := buildVars(os.Getenv("ARC_E2E_IMAGE_REPO"))
|
||||||
|
|
||||||
|
env := initTestEnv(t, k8sMinorVer, vars)
|
||||||
|
if vt := os.Getenv("ARC_E2E_VERIFY_TIMEOUT"); vt != "" {
|
||||||
|
var err error
|
||||||
|
env.VerifyTimeout, err = time.ParseDuration(vt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse duration %q: %v", vt, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
t.Run("build and load images", func(t *testing.T) {
|
t.Run("build and load images", func(t *testing.T) {
|
||||||
env.buildAndLoadImages(t)
|
env.buildAndLoadImages(t)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if t.Failed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
t.Run("install cert-manager", func(t *testing.T) {
|
t.Run("install cert-manager", func(t *testing.T) {
|
||||||
env.installCertManager(t)
|
env.installCertManager(t)
|
||||||
})
|
})
|
||||||
@@ -121,72 +104,131 @@ func TestE2E(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("install actions-runner-controller and runners", func(t *testing.T) {
|
t.Run("RunnerSets", func(t *testing.T) {
|
||||||
env.installActionsRunnerController(t)
|
if os.Getenv("ARC_E2E_SKIP_RUNNERSETS") != "" {
|
||||||
|
t.Skip("RunnerSets test has been skipped due to ARC_E2E_SKIP_RUNNERSETS")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
testID string
|
||||||
|
)
|
||||||
|
|
||||||
|
t.Run("get or generate test ID", func(t *testing.T) {
|
||||||
|
testID = env.GetOrGenerateTestID(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
if !skipTestIDCleanUp {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
env.DeleteTestID(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("install actions-runner-controller v0.24.1", func(t *testing.T) {
|
||||||
|
env.installActionsRunnerController(t, "summerwind/actions-runner-controller", "v0.24.1", testID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("install argo-tunnel", func(t *testing.T) {
|
||||||
|
env.installArgoTunnel(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
if !skipArgoTunnelCleanUp {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
env.uninstallArgoTunnel(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("deploy runners", func(t *testing.T) {
|
||||||
|
env.deploy(t, RunnerSets, testID)
|
||||||
|
})
|
||||||
|
|
||||||
|
if !skipRunnerCleanUp {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
env.undeploy(t, RunnerSets, testID)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("install edge actions-runner-controller", func(t *testing.T) {
|
||||||
|
env.installActionsRunnerController(t, vars.controllerImageRepo, vars.controllerImageTag, testID)
|
||||||
|
})
|
||||||
|
|
||||||
|
if t.Failed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("Install workflow", func(t *testing.T) {
|
||||||
|
env.installActionsWorkflow(t, RunnerSets, testID)
|
||||||
|
})
|
||||||
|
|
||||||
|
if t.Failed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("Verify workflow run result", func(t *testing.T) {
|
||||||
|
env.verifyActionsWorkflowRun(t, testID)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
if t.Failed() {
|
t.Run("RunnerDeployments", func(t *testing.T) {
|
||||||
return
|
var (
|
||||||
}
|
testID string
|
||||||
|
)
|
||||||
|
|
||||||
t.Run("Install workflow", func(t *testing.T) {
|
t.Run("get or generate test ID", func(t *testing.T) {
|
||||||
env.installActionsWorkflow(t)
|
testID = env.GetOrGenerateTestID(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
if !skipTestIDCleanUp {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
env.DeleteTestID(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("install actions-runner-controller v0.24.1", func(t *testing.T) {
|
||||||
|
env.installActionsRunnerController(t, "summerwind/actions-runner-controller", "v0.24.1", testID)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("install argo-tunnel", func(t *testing.T) {
|
||||||
|
env.installArgoTunnel(t)
|
||||||
|
})
|
||||||
|
|
||||||
|
if !skipArgoTunnelCleanUp {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
env.uninstallArgoTunnel(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("deploy runners", func(t *testing.T) {
|
||||||
|
env.deploy(t, RunnerDeployments, testID)
|
||||||
|
})
|
||||||
|
|
||||||
|
if !skipRunnerCleanUp {
|
||||||
|
t.Cleanup(func() {
|
||||||
|
env.undeploy(t, RunnerDeployments, testID)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("install edge actions-runner-controller", func(t *testing.T) {
|
||||||
|
env.installActionsRunnerController(t, vars.controllerImageRepo, vars.controllerImageTag, testID)
|
||||||
|
})
|
||||||
|
|
||||||
|
if t.Failed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("Install workflow", func(t *testing.T) {
|
||||||
|
env.installActionsWorkflow(t, RunnerDeployments, testID)
|
||||||
|
})
|
||||||
|
|
||||||
|
if t.Failed() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("Verify workflow run result", func(t *testing.T) {
|
||||||
|
env.verifyActionsWorkflowRun(t, testID)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
if t.Failed() {
|
if retainCluster {
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("Verify workflow run result", func(t *testing.T) {
|
|
||||||
env.verifyActionsWorkflowRun(t)
|
|
||||||
})
|
|
||||||
|
|
||||||
if os.Getenv("ARC_E2E_NO_CLEANUP") != "" {
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestE2ERunnerDeploy(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipped as -short is set")
|
|
||||||
}
|
|
||||||
|
|
||||||
env := initTestEnv(t)
|
|
||||||
env.useApp = true
|
|
||||||
|
|
||||||
t.Run("build and load images", func(t *testing.T) {
|
|
||||||
env.buildAndLoadImages(t)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("install cert-manager", func(t *testing.T) {
|
|
||||||
env.installCertManager(t)
|
|
||||||
})
|
|
||||||
|
|
||||||
if t.Failed() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("install actions-runner-controller and runners", func(t *testing.T) {
|
|
||||||
env.installActionsRunnerController(t)
|
|
||||||
})
|
|
||||||
|
|
||||||
if t.Failed() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("Install workflow", func(t *testing.T) {
|
|
||||||
env.installActionsWorkflow(t)
|
|
||||||
})
|
|
||||||
|
|
||||||
if t.Failed() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("Verify workflow run result", func(t *testing.T) {
|
|
||||||
env.verifyActionsWorkflowRun(t)
|
|
||||||
})
|
|
||||||
|
|
||||||
if os.Getenv("ARC_E2E_NO_CLEANUP") != "" {
|
|
||||||
t.FailNow()
|
t.FailNow()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -194,41 +236,123 @@ func TestE2ERunnerDeploy(t *testing.T) {
|
|||||||
type env struct {
|
type env struct {
|
||||||
*testing.Env
|
*testing.Env
|
||||||
|
|
||||||
useRunnerSet bool
|
Kind *testing.Kind
|
||||||
|
|
||||||
// Uses GITHUB_APP_ID, GITHUB_APP_INSTALLATION_ID, and GITHUB_APP_PRIVATE_KEY
|
// Uses GITHUB_APP_ID, GITHUB_APP_INSTALLATION_ID, and GITHUB_APP_PRIVATE_KEY
|
||||||
// to let ARC authenticate as a GitHub App
|
// to let ARC authenticate as a GitHub App
|
||||||
useApp bool
|
useApp bool
|
||||||
|
|
||||||
testID string
|
testName string
|
||||||
testName string
|
repoToCommit string
|
||||||
repoToCommit string
|
appID, appInstallationID, appPrivateKeyFile string
|
||||||
appID, appInstallationID, appPrivateKeyFile string
|
githubToken, testRepo, testOrg, testOrgRepo string
|
||||||
runnerLabel, githubToken, testRepo, testOrg, testOrgRepo string
|
githubTokenWebhook string
|
||||||
githubTokenWebhook string
|
testEnterprise string
|
||||||
testEnterprise string
|
testEphemeral string
|
||||||
testEphemeral string
|
scaleDownDelaySecondsAfterScaleOut int64
|
||||||
scaleDownDelaySecondsAfterScaleOut int64
|
minReplicas int64
|
||||||
minReplicas int64
|
dockerdWithinRunnerContainer bool
|
||||||
dockerdWithinRunnerContainer bool
|
remoteKubeconfig string
|
||||||
testJobs []job
|
imagePullSecretName string
|
||||||
|
imagePullPolicy string
|
||||||
|
|
||||||
|
vars vars
|
||||||
|
VerifyTimeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func initTestEnv(t *testing.T) *env {
|
type vars struct {
|
||||||
|
controllerImageRepo, controllerImageTag string
|
||||||
|
|
||||||
|
runnerImageRepo string
|
||||||
|
runnerDindImageRepo string
|
||||||
|
|
||||||
|
prebuildImages []testing.ContainerImage
|
||||||
|
builds []testing.DockerBuild
|
||||||
|
|
||||||
|
commonScriptEnv []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildVars(repo string) vars {
|
||||||
|
if repo == "" {
|
||||||
|
repo = "actionsrunnercontrollere2e"
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
controllerImageRepo = repo + "/actions-runner-controller"
|
||||||
|
controllerImageTag = "e2e"
|
||||||
|
controllerImage = testing.Img(controllerImageRepo, controllerImageTag)
|
||||||
|
runnerImageRepo = repo + "/actions-runner"
|
||||||
|
runnerDindImageRepo = repo + "/actions-runner-dind"
|
||||||
|
runnerImageTag = "e2e"
|
||||||
|
runnerImage = testing.Img(runnerImageRepo, runnerImageTag)
|
||||||
|
runnerDindImage = testing.Img(runnerDindImageRepo, runnerImageTag)
|
||||||
|
)
|
||||||
|
|
||||||
|
var vs vars
|
||||||
|
|
||||||
|
vs.controllerImageRepo, vs.controllerImageTag = controllerImageRepo, controllerImageTag
|
||||||
|
vs.runnerDindImageRepo = runnerDindImageRepo
|
||||||
|
vs.runnerImageRepo = runnerImageRepo
|
||||||
|
|
||||||
|
// vs.controllerImage, vs.controllerImageTag
|
||||||
|
|
||||||
|
vs.prebuildImages = []testing.ContainerImage{
|
||||||
|
controllerImage,
|
||||||
|
runnerImage,
|
||||||
|
runnerDindImage,
|
||||||
|
}
|
||||||
|
|
||||||
|
vs.builds = []testing.DockerBuild{
|
||||||
|
{
|
||||||
|
Dockerfile: "../../Dockerfile",
|
||||||
|
Args: []testing.BuildArg{},
|
||||||
|
Image: controllerImage,
|
||||||
|
EnableBuildX: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Dockerfile: "../../runner/actions-runner.dockerfile",
|
||||||
|
Args: []testing.BuildArg{
|
||||||
|
{
|
||||||
|
Name: "RUNNER_VERSION",
|
||||||
|
Value: "2.294.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Image: runnerImage,
|
||||||
|
EnableBuildX: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Dockerfile: "../../runner/actions-runner-dind.dockerfile",
|
||||||
|
Args: []testing.BuildArg{
|
||||||
|
{
|
||||||
|
Name: "RUNNER_VERSION",
|
||||||
|
Value: "2.294.0",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Image: runnerDindImage,
|
||||||
|
EnableBuildX: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
vs.commonScriptEnv = []string{
|
||||||
|
"SYNC_PERIOD=" + "30s",
|
||||||
|
"RUNNER_TAG=" + runnerImageTag,
|
||||||
|
}
|
||||||
|
|
||||||
|
return vs
|
||||||
|
}
|
||||||
|
|
||||||
|
func initTestEnv(t *testing.T, k8sMinorVer string, vars vars) *env {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
testingEnv := testing.Start(t, testing.Preload(images...))
|
testingEnv := testing.Start(t, k8sMinorVer)
|
||||||
|
|
||||||
e := &env{Env: testingEnv}
|
e := &env{Env: testingEnv}
|
||||||
|
|
||||||
id := e.ID()
|
testName := t.Name()
|
||||||
|
|
||||||
testName := t.Name() + " " + id
|
|
||||||
|
|
||||||
t.Logf("Initializing test with name %s", testName)
|
t.Logf("Initializing test with name %s", testName)
|
||||||
|
|
||||||
e.testID = id
|
|
||||||
e.testName = testName
|
e.testName = testName
|
||||||
e.runnerLabel = "test-" + id
|
|
||||||
e.githubToken = testing.Getenv(t, "GITHUB_TOKEN")
|
e.githubToken = testing.Getenv(t, "GITHUB_TOKEN")
|
||||||
e.appID = testing.Getenv(t, "GITHUB_APP_ID")
|
e.appID = testing.Getenv(t, "GITHUB_APP_ID")
|
||||||
e.appInstallationID = testing.Getenv(t, "GITHUB_APP_INSTALLATION_ID")
|
e.appInstallationID = testing.Getenv(t, "GITHUB_APP_INSTALLATION_ID")
|
||||||
@@ -240,7 +364,29 @@ func initTestEnv(t *testing.T) *env {
|
|||||||
e.testOrgRepo = testing.Getenv(t, "TEST_ORG_REPO", "")
|
e.testOrgRepo = testing.Getenv(t, "TEST_ORG_REPO", "")
|
||||||
e.testEnterprise = testing.Getenv(t, "TEST_ENTERPRISE", "")
|
e.testEnterprise = testing.Getenv(t, "TEST_ENTERPRISE", "")
|
||||||
e.testEphemeral = testing.Getenv(t, "TEST_EPHEMERAL", "")
|
e.testEphemeral = testing.Getenv(t, "TEST_EPHEMERAL", "")
|
||||||
e.testJobs = createTestJobs(id, testResultCMNamePrefix, 6)
|
e.remoteKubeconfig = testing.Getenv(t, "ARC_E2E_REMOTE_KUBECONFIG", "")
|
||||||
|
e.imagePullSecretName = testing.Getenv(t, "ARC_E2E_IMAGE_PULL_SECRET_NAME", "")
|
||||||
|
e.vars = vars
|
||||||
|
|
||||||
|
if e.remoteKubeconfig != "" {
|
||||||
|
e.imagePullPolicy = "Always"
|
||||||
|
} else {
|
||||||
|
e.imagePullPolicy = "IfNotPresent"
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.remoteKubeconfig == "" {
|
||||||
|
e.Kind = testing.StartKind(t, k8sMinorVer, testing.Preload(images...))
|
||||||
|
e.Env.Kubeconfig = e.Kind.Kubeconfig()
|
||||||
|
} else {
|
||||||
|
e.Env.Kubeconfig = e.remoteKubeconfig
|
||||||
|
|
||||||
|
// Kind automatically installs https://github.com/rancher/local-path-provisioner for PVs.
|
||||||
|
// But assuming the remote cluster isn't a kind Kubernetes cluster,
|
||||||
|
// we need to install any provisioner manually.
|
||||||
|
// Here, we install the local-path-provisioner on the remote cluster too,
|
||||||
|
// so that we won't suffer from E2E failures due to the provisioner difference.
|
||||||
|
e.KubectlApply(t, "https://raw.githubusercontent.com/rancher/local-path-provisioner/v0.0.22/deploy/local-path-storage.yaml", testing.KubectlConfig{})
|
||||||
|
}
|
||||||
|
|
||||||
e.scaleDownDelaySecondsAfterScaleOut, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT", "10"), 10, 32)
|
e.scaleDownDelaySecondsAfterScaleOut, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT", "10"), 10, 32)
|
||||||
e.minReplicas, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_MIN_REPLICAS", "1"), 10, 32)
|
e.minReplicas, _ = strconv.ParseInt(testing.Getenv(t, "TEST_RUNNER_MIN_REPLICAS", "1"), 10, 32)
|
||||||
@@ -260,8 +406,29 @@ func (e *env) f() {
|
|||||||
func (e *env) buildAndLoadImages(t *testing.T) {
|
func (e *env) buildAndLoadImages(t *testing.T) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
e.DockerBuild(t, builds)
|
e.DockerBuild(t, e.vars.builds)
|
||||||
e.KindLoadImages(t, prebuildImages)
|
|
||||||
|
if e.remoteKubeconfig == "" {
|
||||||
|
e.KindLoadImages(t, e.vars.prebuildImages)
|
||||||
|
} else {
|
||||||
|
// If it fails with `no basic auth credentials` here, you might have missed logging into the container registry beforehand.
|
||||||
|
// For ECR, run something like:
|
||||||
|
// aws ecr get-login-password | docker login --username AWS --password-stdin ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com
|
||||||
|
// Also note that the authenticated session can be expired in a day or so(probably depends on your AWS config),
|
||||||
|
// so you might better write a script to do docker login before running the E2E test.
|
||||||
|
e.DockerPush(t, e.vars.prebuildImages)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) KindLoadImages(t *testing.T, prebuildImages []testing.ContainerImage) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if err := e.Kind.LoadImages(ctx, prebuildImages); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *env) installCertManager(t *testing.T) {
|
func (e *env) installCertManager(t *testing.T) {
|
||||||
@@ -281,35 +448,23 @@ func (e *env) installCertManager(t *testing.T) {
|
|||||||
e.KubectlWaitUntilDeployAvailable(t, "cert-manager", waitCfg.WithTimeout(60*time.Second))
|
e.KubectlWaitUntilDeployAvailable(t, "cert-manager", waitCfg.WithTimeout(60*time.Second))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *env) installActionsRunnerController(t *testing.T) {
|
func (e *env) installActionsRunnerController(t *testing.T, repo, tag, testID string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
e.createControllerNamespaceAndServiceAccount(t)
|
e.createControllerNamespaceAndServiceAccount(t)
|
||||||
|
|
||||||
scriptEnv := []string{
|
scriptEnv := []string{
|
||||||
"KUBECONFIG=" + e.Kubeconfig(),
|
"KUBECONFIG=" + e.Kubeconfig,
|
||||||
"ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm",
|
"ACCEPTANCE_TEST_DEPLOYMENT_TOOL=" + "helm",
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.useRunnerSet {
|
|
||||||
scriptEnv = append(scriptEnv, "USE_RUNNERSET=1")
|
|
||||||
} else {
|
|
||||||
scriptEnv = append(scriptEnv, "USE_RUNNERSET=false")
|
|
||||||
}
|
|
||||||
|
|
||||||
varEnv := []string{
|
varEnv := []string{
|
||||||
"TEST_ENTERPRISE=" + e.testEnterprise,
|
|
||||||
"TEST_REPO=" + e.testRepo,
|
|
||||||
"TEST_ORG=" + e.testOrg,
|
|
||||||
"TEST_ORG_REPO=" + e.testOrgRepo,
|
|
||||||
"WEBHOOK_GITHUB_TOKEN=" + e.githubTokenWebhook,
|
"WEBHOOK_GITHUB_TOKEN=" + e.githubTokenWebhook,
|
||||||
"RUNNER_LABEL=" + e.runnerLabel,
|
"TEST_ID=" + testID,
|
||||||
"TEST_ID=" + e.testID,
|
"NAME=" + repo,
|
||||||
"TEST_EPHEMERAL=" + e.testEphemeral,
|
"VERSION=" + tag,
|
||||||
fmt.Sprintf("RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT=%d", e.scaleDownDelaySecondsAfterScaleOut),
|
"IMAGE_PULL_SECRET=" + e.imagePullSecretName,
|
||||||
fmt.Sprintf("REPO_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
|
"IMAGE_PULL_POLICY=" + e.imagePullPolicy,
|
||||||
fmt.Sprintf("ORG_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
|
|
||||||
fmt.Sprintf("ENTERPRISE_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.useApp {
|
if e.useApp {
|
||||||
@@ -326,22 +481,96 @@ func (e *env) installActionsRunnerController(t *testing.T) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
scriptEnv = append(scriptEnv, varEnv...)
|
||||||
|
scriptEnv = append(scriptEnv, e.vars.commonScriptEnv...)
|
||||||
|
|
||||||
|
e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) deploy(t *testing.T, kind DeployKind, testID string) {
|
||||||
|
t.Helper()
|
||||||
|
e.do(t, "apply", kind, testID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) undeploy(t *testing.T, kind DeployKind, testID string) {
|
||||||
|
t.Helper()
|
||||||
|
e.do(t, "delete", kind, testID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) do(t *testing.T, op string, kind DeployKind, testID string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
e.createControllerNamespaceAndServiceAccount(t)
|
||||||
|
|
||||||
|
scriptEnv := []string{
|
||||||
|
"KUBECONFIG=" + e.Kubeconfig,
|
||||||
|
"OP=" + op,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case RunnerSets:
|
||||||
|
scriptEnv = append(scriptEnv, "USE_RUNNERSET=1")
|
||||||
|
case RunnerDeployments:
|
||||||
|
scriptEnv = append(scriptEnv, "USE_RUNNERSET=false")
|
||||||
|
default:
|
||||||
|
t.Fatalf("Invalid deploy kind %v", kind)
|
||||||
|
}
|
||||||
|
|
||||||
|
varEnv := []string{
|
||||||
|
"TEST_ENTERPRISE=" + e.testEnterprise,
|
||||||
|
"TEST_REPO=" + e.testRepo,
|
||||||
|
"TEST_ORG=" + e.testOrg,
|
||||||
|
"TEST_ORG_REPO=" + e.testOrgRepo,
|
||||||
|
"RUNNER_LABEL=" + e.runnerLabel(testID),
|
||||||
|
"TEST_EPHEMERAL=" + e.testEphemeral,
|
||||||
|
fmt.Sprintf("RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT=%d", e.scaleDownDelaySecondsAfterScaleOut),
|
||||||
|
fmt.Sprintf("REPO_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
|
||||||
|
fmt.Sprintf("ORG_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
|
||||||
|
fmt.Sprintf("ENTERPRISE_RUNNER_MIN_REPLICAS=%d", e.minReplicas),
|
||||||
|
}
|
||||||
|
|
||||||
if e.dockerdWithinRunnerContainer {
|
if e.dockerdWithinRunnerContainer {
|
||||||
varEnv = append(varEnv,
|
varEnv = append(varEnv,
|
||||||
"RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=true",
|
"RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=true",
|
||||||
"RUNNER_NAME="+runnerDindImageRepo,
|
"RUNNER_NAME="+e.vars.runnerDindImageRepo,
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
varEnv = append(varEnv,
|
varEnv = append(varEnv,
|
||||||
"RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=false",
|
"RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER=false",
|
||||||
"RUNNER_NAME="+runnerImageRepo,
|
"RUNNER_NAME="+e.vars.runnerImageRepo,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
scriptEnv = append(scriptEnv, varEnv...)
|
scriptEnv = append(scriptEnv, varEnv...)
|
||||||
scriptEnv = append(scriptEnv, commonScriptEnv...)
|
scriptEnv = append(scriptEnv, e.vars.commonScriptEnv...)
|
||||||
|
|
||||||
e.RunScript(t, "../../acceptance/deploy.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
|
e.RunScript(t, "../../acceptance/deploy_runners.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) installArgoTunnel(t *testing.T) {
|
||||||
|
e.doArgoTunnel(t, "apply")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) uninstallArgoTunnel(t *testing.T) {
|
||||||
|
e.doArgoTunnel(t, "delete")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) doArgoTunnel(t *testing.T, op string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
scriptEnv := []string{
|
||||||
|
"KUBECONFIG=" + e.Kubeconfig,
|
||||||
|
"OP=" + op,
|
||||||
|
"TUNNEL_ID=" + os.Getenv("TUNNEL_ID"),
|
||||||
|
"TUNNE_NAME=" + os.Getenv("TUNNEL_NAME"),
|
||||||
|
"TUNNEL_HOSTNAME=" + os.Getenv("TUNNEL_HOSTNAME"),
|
||||||
|
}
|
||||||
|
|
||||||
|
e.RunScript(t, "../../acceptance/argotunnel.sh", testing.ScriptConfig{Dir: "../..", Env: scriptEnv})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) runnerLabel(testID string) string {
|
||||||
|
return "test-" + testID
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *env) createControllerNamespaceAndServiceAccount(t *testing.T) {
|
func (e *env) createControllerNamespaceAndServiceAccount(t *testing.T) {
|
||||||
@@ -351,16 +580,28 @@ func (e *env) createControllerNamespaceAndServiceAccount(t *testing.T) {
|
|||||||
e.KubectlEnsureClusterRoleBindingServiceAccount(t, "default-admin", "cluster-admin", "default:default", testing.KubectlConfig{})
|
e.KubectlEnsureClusterRoleBindingServiceAccount(t, "default-admin", "cluster-admin", "default:default", testing.KubectlConfig{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *env) installActionsWorkflow(t *testing.T) {
|
func (e *env) installActionsWorkflow(t *testing.T, kind DeployKind, testID string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
installActionsWorkflow(t, e.testName, e.runnerLabel, testResultCMNamePrefix, e.repoToCommit, e.testJobs)
|
installActionsWorkflow(t, e.testName+" "+testID, e.runnerLabel(testID), testResultCMNamePrefix, e.repoToCommit, kind, e.testJobs(testID))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *env) verifyActionsWorkflowRun(t *testing.T) {
|
func (e *env) testJobs(testID string) []job {
|
||||||
|
return createTestJobs(testID, testResultCMNamePrefix, 6)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) verifyActionsWorkflowRun(t *testing.T, testID string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
verifyActionsWorkflowRun(t, e.Env, e.testJobs)
|
verifyActionsWorkflowRun(t, e.Env, e.testJobs(testID), e.verifyTimeout())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *env) verifyTimeout() time.Duration {
|
||||||
|
if e.VerifyTimeout > 0 {
|
||||||
|
return e.VerifyTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
return 8 * 60 * time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
type job struct {
|
type job struct {
|
||||||
@@ -383,7 +624,7 @@ func createTestJobs(id, testResultCMNamePrefix string, numJobs int) []job {
|
|||||||
|
|
||||||
const Branch = "main"
|
const Branch = "main"
|
||||||
|
|
||||||
func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNamePrefix, testRepo string, testJobs []job) {
|
func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNamePrefix, testRepo string, kind DeployKind, testJobs []job) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
@@ -400,45 +641,70 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
|
|||||||
Jobs: map[string]testing.Job{},
|
Jobs: map[string]testing.Job{},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kubernetesContainerMode := os.Getenv("TEST_CONTAINER_MODE") == "kubernetes"
|
||||||
|
|
||||||
|
var container string
|
||||||
|
if kubernetesContainerMode {
|
||||||
|
container = "golang:1.18"
|
||||||
|
}
|
||||||
|
|
||||||
for _, j := range testJobs {
|
for _, j := range testJobs {
|
||||||
wf.Jobs[j.name] = testing.Job{
|
steps := []testing.Step{
|
||||||
RunsOn: runnerLabel,
|
{
|
||||||
Steps: []testing.Step{
|
Uses: testing.ActionsCheckout,
|
||||||
{
|
},
|
||||||
Uses: testing.ActionsCheckoutV2,
|
}
|
||||||
},
|
|
||||||
{
|
if !kubernetesContainerMode {
|
||||||
|
if kind == RunnerDeployments {
|
||||||
|
steps = append(steps,
|
||||||
|
testing.Step{
|
||||||
|
Run: "sudo mkdir -p \"${RUNNER_TOOL_CACHE}\" \"${HOME}/.cache\" \"/var/lib/docker\"",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
steps = append(steps,
|
||||||
|
testing.Step{
|
||||||
// This might be the easiest way to handle permissions without use of securityContext
|
// This might be the easiest way to handle permissions without use of securityContext
|
||||||
// https://stackoverflow.com/questions/50156124/kubernetes-nfs-persistent-volumes-permission-denied#comment107483717_53186320
|
// https://stackoverflow.com/questions/50156124/kubernetes-nfs-persistent-volumes-permission-denied#comment107483717_53186320
|
||||||
Run: "sudo chmod 777 -R \"${RUNNER_TOOL_CACHE}\" \"${HOME}/.cache\" \"/var/lib/docker\"",
|
Run: "sudo chmod 777 -R \"${RUNNER_TOOL_CACHE}\" \"${HOME}/.cache\" \"/var/lib/docker\"",
|
||||||
},
|
},
|
||||||
{
|
testing.Step{
|
||||||
// This might be the easiest way to handle permissions without use of securityContext
|
// This might be the easiest way to handle permissions without use of securityContext
|
||||||
// https://stackoverflow.com/questions/50156124/kubernetes-nfs-persistent-volumes-permission-denied#comment107483717_53186320
|
// https://stackoverflow.com/questions/50156124/kubernetes-nfs-persistent-volumes-permission-denied#comment107483717_53186320
|
||||||
Run: "ls -lah \"${RUNNER_TOOL_CACHE}\" \"${HOME}/.cache\" \"/var/lib/docker\"",
|
Run: "ls -lah \"${RUNNER_TOOL_CACHE}\" \"${HOME}/.cache\" \"/var/lib/docker\"",
|
||||||
},
|
},
|
||||||
{
|
testing.Step{
|
||||||
Uses: "actions/setup-go@v3",
|
Uses: "actions/setup-go@v3",
|
||||||
With: &testing.With{
|
With: &testing.With{
|
||||||
GoVersion: "1.18.2",
|
GoVersion: "1.18.2",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
)
|
||||||
Run: "go version",
|
}
|
||||||
},
|
|
||||||
{
|
steps = append(steps,
|
||||||
Run: "go build .",
|
testing.Step{
|
||||||
},
|
Run: "go version",
|
||||||
{
|
},
|
||||||
|
testing.Step{
|
||||||
|
Run: "go build .",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if !kubernetesContainerMode {
|
||||||
|
steps = append(steps,
|
||||||
|
testing.Step{
|
||||||
// https://github.com/docker/buildx/issues/413#issuecomment-710660155
|
// https://github.com/docker/buildx/issues/413#issuecomment-710660155
|
||||||
// To prevent setup-buildx-action from failing with:
|
// To prevent setup-buildx-action from failing with:
|
||||||
// error: could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`
|
// error: could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`
|
||||||
Run: "docker context create mycontext",
|
Run: "docker context create mycontext",
|
||||||
},
|
},
|
||||||
{
|
testing.Step{
|
||||||
Run: "docker context use mycontext",
|
Run: "docker context use mycontext",
|
||||||
},
|
},
|
||||||
{
|
testing.Step{
|
||||||
Name: "Set up Docker Buildx",
|
Name: "Set up Docker Buildx",
|
||||||
Uses: "docker/setup-buildx-action@v1",
|
Uses: "docker/setup-buildx-action@v1",
|
||||||
With: &testing.With{
|
With: &testing.With{
|
||||||
@@ -449,30 +715,36 @@ func installActionsWorkflow(t *testing.T, testName, runnerLabel, testResultCMNam
|
|||||||
Install: false,
|
Install: false,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
testing.Step{
|
||||||
Run: "docker buildx build --platform=linux/amd64 " +
|
Run: "docker buildx build --platform=linux/amd64 " +
|
||||||
"--cache-from=type=local,src=/home/runner/.cache/buildx " +
|
"--cache-from=type=local,src=/home/runner/.cache/buildx " +
|
||||||
"--cache-to=type=local,dest=/home/runner/.cache/buildx-new,mode=max " +
|
"--cache-to=type=local,dest=/home/runner/.cache/buildx-new,mode=max " +
|
||||||
".",
|
".",
|
||||||
},
|
},
|
||||||
{
|
testing.Step{
|
||||||
// https://github.com/docker/build-push-action/blob/master/docs/advanced/cache.md#local-cache
|
// https://github.com/docker/build-push-action/blob/master/docs/advanced/cache.md#local-cache
|
||||||
// See https://github.com/moby/buildkit/issues/1896 for why this is needed
|
// See https://github.com/moby/buildkit/issues/1896 for why this is needed
|
||||||
Run: "rm -rf /home/runner/.cache/buildx && mv /home/runner/.cache/buildx-new /home/runner/.cache/buildx",
|
Run: "rm -rf /home/runner/.cache/buildx && mv /home/runner/.cache/buildx-new /home/runner/.cache/buildx",
|
||||||
},
|
},
|
||||||
{
|
testing.Step{
|
||||||
Run: "ls -lah /home/runner/.cache/*",
|
Run: "ls -lah /home/runner/.cache/*",
|
||||||
},
|
},
|
||||||
{
|
testing.Step{
|
||||||
Uses: "azure/setup-kubectl@v1",
|
Uses: "azure/setup-kubectl@v1",
|
||||||
With: &testing.With{
|
With: &testing.With{
|
||||||
Version: "v1.20.2",
|
Version: "v1.20.2",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
testing.Step{
|
||||||
Run: fmt.Sprintf("./test.sh %s %s", t.Name(), j.testArg),
|
Run: fmt.Sprintf("./test.sh %s %s", t.Name(), j.testArg),
|
||||||
},
|
},
|
||||||
},
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
wf.Jobs[j.name] = testing.Job{
|
||||||
|
RunsOn: runnerLabel,
|
||||||
|
Container: container,
|
||||||
|
Steps: steps,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -506,7 +778,7 @@ kubectl create cm %s$id --from-literal=status=ok
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job) {
|
func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job, timeout time.Duration) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
var expected []string
|
var expected []string
|
||||||
@@ -524,7 +796,7 @@ func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job) {
|
|||||||
testResultCMName := testJobs[i].configMapName
|
testResultCMName := testJobs[i].configMapName
|
||||||
|
|
||||||
kubectlEnv := []string{
|
kubectlEnv := []string{
|
||||||
"KUBECONFIG=" + env.Kubeconfig(),
|
"KUBECONFIG=" + env.Kubeconfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
cmCfg := testing.KubectlConfig{
|
cmCfg := testing.KubectlConfig{
|
||||||
@@ -556,5 +828,5 @@ func verifyActionsWorkflowRun(t *testing.T, env *testing.Env, testJobs []job) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return results, err
|
return results, err
|
||||||
}, 3*60*time.Second, 10*time.Second).Should(gomega.Equal(expected))
|
}, timeout, 30*time.Second).Should(gomega.Equal(expected))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -71,3 +71,18 @@ func (k *Docker) dockerBuildCombinedOutput(ctx context.Context, build DockerBuil
|
|||||||
|
|
||||||
return k.CombinedOutput(cmd)
|
return k.CombinedOutput(cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (k *Docker) Push(ctx context.Context, images []ContainerImage) error {
|
||||||
|
for _, img := range images {
|
||||||
|
_, err := k.CombinedOutput(dockerPushCmd(ctx, img.Repo, img.Tag))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dockerPushCmd(ctx context.Context, repo, tag string) *exec.Cmd {
|
||||||
|
return exec.CommandContext(ctx, "docker", "push", repo+":"+tag)
|
||||||
|
}
|
||||||
|
|||||||
@@ -86,6 +86,16 @@ func (k *Kubectl) CreateCMLiterals(ctx context.Context, name string, literals ma
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (k *Kubectl) DeleteCM(ctx context.Context, name string, cfg KubectlConfig) error {
|
||||||
|
args := []string{"cm", name}
|
||||||
|
|
||||||
|
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "delete", args, cfg)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (k *Kubectl) Apply(ctx context.Context, path string, cfg KubectlConfig) error {
|
func (k *Kubectl) Apply(ctx context.Context, path string, cfg KubectlConfig) error {
|
||||||
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "apply", []string{"-f", path}, cfg)); err != nil {
|
if _, err := k.CombinedOutput(k.kubectlCmd(ctx, "apply", []string{"-f", path}, cfg)); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user