mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
55 Commits
actions-ru
...
actions-ru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3b2d2c052e | ||
|
|
37c2a62fa8 | ||
|
|
2eeb56d1c8 | ||
|
|
a612b38f9b | ||
|
|
1c67ea65d9 | ||
|
|
c26fb5ad5f | ||
|
|
325c2cc385 | ||
|
|
2e551c9d0a | ||
|
|
7b44454d01 | ||
|
|
f2680b2f2d | ||
|
|
b42b8406a2 | ||
|
|
3c125e2191 | ||
|
|
9ed245c85e | ||
|
|
5b7807d54b | ||
|
|
156e2c1987 | ||
|
|
da4dfb3fdf | ||
|
|
0783ffe989 | ||
|
|
374105c1f3 | ||
|
|
bc6e499e4f | ||
|
|
07f822bb08 | ||
|
|
3a0332dfdc | ||
|
|
f6ab66c55b | ||
|
|
d874a5cfda | ||
|
|
c424215044 | ||
|
|
c5fdfd63db | ||
|
|
23a45eaf87 | ||
|
|
dee997b44e | ||
|
|
2929a739e3 | ||
|
|
3cccca8d09 | ||
|
|
7a7086e7aa | ||
|
|
565b14a148 | ||
|
|
ecc441de3f | ||
|
|
25335bb3c3 | ||
|
|
9b871567b1 | ||
|
|
264cf494e3 | ||
|
|
3f23501b8e | ||
|
|
5530030c67 | ||
|
|
8d3a83b07a | ||
|
|
a6270b44d5 | ||
|
|
2273b198a1 | ||
|
|
3d62e73f8c | ||
|
|
f5c639ae28 | ||
|
|
81016154c0 | ||
|
|
728829be7b | ||
|
|
c0b8f9d483 | ||
|
|
ced1c2321a | ||
|
|
1b8a656051 | ||
|
|
1753fa3530 | ||
|
|
8c0f3dfc79 | ||
|
|
dbda292f54 | ||
|
|
550a864198 | ||
|
|
4fa5315311 | ||
|
|
11e58fcc41 | ||
|
|
f220fefe92 | ||
|
|
56b4598d1d |
64
.github/workflows/build-and-release-runners.yml
vendored
64
.github/workflows/build-and-release-runners.yml
vendored
@@ -13,21 +13,27 @@ on:
|
||||
paths:
|
||||
- runner/patched/*
|
||||
- runner/Dockerfile
|
||||
- runner/dindrunner.Dockerfile
|
||||
- runner/Dockerfile.ubuntu.1804
|
||||
- runner/Dockerfile.dindrunner
|
||||
- runner/entrypoint.sh
|
||||
- .github/workflows/build-and-release-runners.yml
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build ${{ matrix.name }}
|
||||
name: Build ${{ matrix.name }}-ubuntu-${{ matrix.os-version }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- name: actions-runner
|
||||
os-version: 20.04
|
||||
dockerfile: Dockerfile
|
||||
- name: actions-runner
|
||||
os-version: 18.04
|
||||
dockerfile: Dockerfile.ubuntu.1804
|
||||
- name: actions-runner-dind
|
||||
dockerfile: dindrunner.Dockerfile
|
||||
os-version: 20.04
|
||||
dockerfile: Dockerfile.dindrunner
|
||||
env:
|
||||
RUNNER_VERSION: 2.277.1
|
||||
DOCKER_VERSION: 19.03.12
|
||||
@@ -55,7 +61,55 @@ jobs:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
|
||||
- name: Build and Push
|
||||
- name: Build and Push Versioned Tags
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./runner
|
||||
file: ./runner/${{ matrix.dockerfile }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
build-args: |
|
||||
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-ubuntu-${{ matrix.os-version }}
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-ubuntu-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
||||
|
||||
latest-tags:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build ${{ matrix.name }}-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- name: actions-runner
|
||||
dockerfile: Dockerfile
|
||||
- name: actions-runner-dind
|
||||
dockerfile: Dockerfile.dindrunner
|
||||
env:
|
||||
RUNNER_VERSION: 2.277.1
|
||||
DOCKER_VERSION: 19.03.12
|
||||
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
if: ${{ github.event_name == 'push' || github.event_name == 'release' }}
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
|
||||
- name: Build and Push Latest Tag
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./runner
|
||||
@@ -66,6 +120,4 @@ jobs:
|
||||
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ steps.vars.outputs.sha_short }}
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:latest
|
||||
|
||||
8
CONTRIBUTING.md
Normal file
8
CONTRIBUTING.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# Contributing
|
||||
|
||||
### Helm Verison Bumps
|
||||
|
||||
**Chart Version :** When bumping the chart version follow semantic versioning https://semver.org/<br />
|
||||
**App Version :** When bumping the app version you will also need to bump the chart verison too. Again, follow semantic verisoning when bumping the chart.
|
||||
|
||||
To determine if you need tp bump the MAJOR, MINOR or PATCH versions you will need to review the changes between the previous app version and the new app verison and / or ask for a maintainer to advise.
|
||||
90
Makefile
90
Makefile
@@ -14,6 +14,8 @@ else
|
||||
GOBIN=$(shell go env GOBIN)
|
||||
endif
|
||||
|
||||
TEST_ASSETS=$(PWD)/test-assets
|
||||
|
||||
# default list of platforms for which multiarch image is built
|
||||
ifeq (${PLATFORMS}, )
|
||||
export PLATFORMS="linux/amd64,linux/arm64"
|
||||
@@ -37,6 +39,13 @@ all: manager
|
||||
test: generate fmt vet manifests
|
||||
go test ./... -coverprofile cover.out
|
||||
|
||||
test-with-deps: kube-apiserver etcd kubectl
|
||||
# See https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants
|
||||
TEST_ASSET_KUBE_APISERVER=$(KUBE_APISERVER_BIN) \
|
||||
TEST_ASSET_ETCD=$(ETCD_BIN) \
|
||||
TEST_ASSET_KUBECTL=$(KUBECTL_BIN) \
|
||||
make test
|
||||
|
||||
# Build manager binary
|
||||
manager: generate fmt vet
|
||||
go build -o bin/manager main.go
|
||||
@@ -126,7 +135,8 @@ release/clean:
|
||||
rm -rf release
|
||||
|
||||
.PHONY: acceptance
|
||||
acceptance: release/clean docker-build docker-push release
|
||||
acceptance: release/clean docker-build release
|
||||
make acceptance/pull
|
||||
ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
||||
ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
||||
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
||||
@@ -134,8 +144,23 @@ acceptance: release/clean docker-build docker-push release
|
||||
|
||||
acceptance/kind:
|
||||
kind create cluster --name acceptance
|
||||
kind load docker-image ${NAME}:${VERSION} --name acceptance
|
||||
kind load docker-image quay.io/brancz/kube-rbac-proxy:v0.8.0 --name acceptance
|
||||
kind load docker-image summerwind/actions-runner:latest --name acceptance
|
||||
kind load docker-image docker:dind --name acceptance
|
||||
kind load docker-image quay.io/jetstack/cert-manager-controller:v1.0.4 --name acceptance
|
||||
kind load docker-image quay.io/jetstack/cert-manager-cainjector:v1.0.4 --name acceptance
|
||||
kind load docker-image quay.io/jetstack/cert-manager-webhook:v1.0.4 --name acceptance
|
||||
kubectl cluster-info --context kind-acceptance
|
||||
|
||||
acceptance/pull:
|
||||
docker pull quay.io/brancz/kube-rbac-proxy:v0.8.0
|
||||
docker pull summerwind/actions-runner:latest
|
||||
docker pull docker:dind
|
||||
docker pull quay.io/jetstack/cert-manager-controller:v1.0.4
|
||||
docker pull quay.io/jetstack/cert-manager-cainjector:v1.0.4
|
||||
docker pull quay.io/jetstack/cert-manager-webhook:v1.0.4
|
||||
|
||||
acceptance/setup:
|
||||
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.yaml #kubectl create namespace actions-runner-system
|
||||
kubectl -n cert-manager wait deploy/cert-manager-cainjector --for condition=available --timeout 60s
|
||||
@@ -191,3 +216,66 @@ ifeq (, $(wildcard $(GOBIN)/yq))
|
||||
}
|
||||
endif
|
||||
YQ=$(GOBIN)/yq
|
||||
|
||||
OS_NAME := $(shell uname -s | tr A-Z a-z)
|
||||
|
||||
# find or download etcd
|
||||
etcd:
|
||||
ifeq (, $(wildcard $(TEST_ASSETS)/etcd))
|
||||
@{ \
|
||||
set -xe ;\
|
||||
INSTALL_TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$INSTALL_TMP_DIR ;\
|
||||
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mkdir -p $(TEST_ASSETS) ;\
|
||||
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||
rm -rf $$INSTALL_TMP_DIR ;\
|
||||
}
|
||||
ETCD_BIN=$(TEST_ASSETS)/etcd
|
||||
else
|
||||
ETCD_BIN=$(TEST_ASSETS)/etcd
|
||||
endif
|
||||
|
||||
# find or download kube-apiserver
|
||||
kube-apiserver:
|
||||
ifeq (, $(wildcard $(TEST_ASSETS)/kube-apiserver))
|
||||
@{ \
|
||||
set -xe ;\
|
||||
INSTALL_TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$INSTALL_TMP_DIR ;\
|
||||
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mkdir -p $(TEST_ASSETS) ;\
|
||||
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||
rm -rf $$INSTALL_TMP_DIR ;\
|
||||
}
|
||||
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
|
||||
else
|
||||
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
|
||||
endif
|
||||
|
||||
|
||||
# find or download kubectl
|
||||
kubectl:
|
||||
ifeq (, $(wildcard $(TEST_ASSETS)/kubectl))
|
||||
@{ \
|
||||
set -xe ;\
|
||||
INSTALL_TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$INSTALL_TMP_DIR ;\
|
||||
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mkdir -p $(TEST_ASSETS) ;\
|
||||
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||
rm -rf $$INSTALL_TMP_DIR ;\
|
||||
}
|
||||
KUBECTL_BIN=$(TEST_ASSETS)/kubectl
|
||||
else
|
||||
KUBECTL_BIN=$(TEST_ASSETS)/kubectl
|
||||
endif
|
||||
|
||||
86
README.md
86
README.md
@@ -21,12 +21,11 @@ ToC:
|
||||
- [Runner with DinD](#runner-with-dind)
|
||||
- [Additional tweaks](#additional-tweaks)
|
||||
- [Runner labels](#runner-labels)
|
||||
- [Runer groups](#runner-groups)
|
||||
- [Runner groups](#runner-groups)
|
||||
- [Using EKS IAM role for service accounts](#using-eks-iam-role-for-service-accounts)
|
||||
- [Software installed in the runner image](#software-installed-in-the-runner-image)
|
||||
- [Common errors](#common-errors)
|
||||
- [Developing](#developing)
|
||||
- [Alternatives](#alternatives)
|
||||
- [Contributing](#contributing)
|
||||
|
||||
## Motivation
|
||||
|
||||
@@ -45,8 +44,8 @@ Install the custom resource and actions-runner-controller with `kubectl` or `hel
|
||||
`kubectl`:
|
||||
|
||||
```shell
|
||||
# REPLACE "v0.17.0" with the version you wish to deploy
|
||||
kubectl apply -f https://github.com/summerwind/actions-runner-controller/releases/download/v0.17.0/actions-runner-controller.yaml
|
||||
# REPLACE "v0.18.2" with the version you wish to deploy
|
||||
kubectl apply -f https://github.com/summerwind/actions-runner-controller/releases/download/v0.18.2/actions-runner-controller.yaml
|
||||
```
|
||||
|
||||
`helm`:
|
||||
@@ -61,7 +60,7 @@ helm upgrade --install -n actions-runner-system actions-runner-controller/action
|
||||
If you use either Github Enterprise Cloud or Server, you can use **actions-runner-controller** with those, too.
|
||||
Authentication works same way as with public Github (repo and organization level).
|
||||
The minimum version of Github Enterprise Server is 3.0.0 (or rc1/rc2).
|
||||
__**NOTE : The maintainers do not have an Enterprise environment to be able to test changes and so are reliant on the community for testing, support is a best endeavors basis only and is community driven**__
|
||||
__**NOTE : The maintainers do not have an Enterprise environment to be able to test changes and so this feature is community driven. Support is on a best endeavors basis.**__
|
||||
|
||||
```shell
|
||||
kubectl set env deploy controller-manager -c manager GITHUB_ENTERPRISE_URL=<GHEC/S URL> --namespace actions-runner-system
|
||||
@@ -88,7 +87,6 @@ spec:
|
||||
template:
|
||||
spec:
|
||||
enterprise: your-enterprise-name
|
||||
dockerdWithinRunnerContainer: true
|
||||
resources:
|
||||
limits:
|
||||
cpu: "4000m"
|
||||
@@ -96,12 +94,6 @@ spec:
|
||||
requests:
|
||||
cpu: "200m"
|
||||
memory: "200Mi"
|
||||
volumeMounts:
|
||||
- mountPath: /runner
|
||||
name: runner
|
||||
volumes:
|
||||
- name: runner
|
||||
emptyDir: {}
|
||||
|
||||
```
|
||||
|
||||
@@ -163,7 +155,7 @@ Log-in to a GitHub account that has `admin` privileges for the repository, and [
|
||||
|
||||
* repo (Full control)
|
||||
|
||||
**Scopes for a Organisation Runner**
|
||||
**Scopes for a Organization Runner**
|
||||
|
||||
* repo (Full control)
|
||||
* admin:org (Full control)
|
||||
@@ -292,7 +284,7 @@ A `RunnerDeployment` can scale the number of runners between `minReplicas` and `
|
||||
|
||||
**TotalNumberOfQueuedAndInProgressWorkflowRuns**
|
||||
|
||||
In the below example, `actions-runner` will pole GitHub for all pending workflows with the pole period defined by the sync period configuration. It will then scale to e.g. 3 if there're 3 pending jobs at sync time.
|
||||
In the below example, `actions-runner` will poll GitHub for all pending workflows with the poll period defined by the sync period configuration. It will then scale to e.g. 3 if there're 3 pending jobs at sync time.
|
||||
With this scaling metric we are required to define a list of repositories within our metric.
|
||||
|
||||
The scale out performance is controlled via the manager containers startup `--sync-period` argument. The default value is set to 10 minutes to prevent default deployments rate limiting themselves from the GitHub API.
|
||||
@@ -302,12 +294,12 @@ The scale out performance is controlled via the manager containers startup `--sy
|
||||
|
||||
**Benefits of this metric**
|
||||
1. Supports named repositories allowing you to restrict the runner to a specified set of repositories server side.
|
||||
2. Scales quickly (within the bounds of the syncPeriod) as it will spin up the number of runners based on the depth of the workflow queue
|
||||
2. Scales the runner count based on the actual queue depth of the jobs meaning a more 1:1 scaling of runners to queued jobs.
|
||||
3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [Github labels](#runner-labels).
|
||||
|
||||
**Drawbacks of this metric**
|
||||
1. Repositories must be named within the scaling metric, maintaining a list of repositories may not be viable in larger environments or self-serve environments.
|
||||
2. May not scale quick enough for some users needs
|
||||
2. May not scale quick enough for some users needs. This metric is pull based and so the queue depth is polled as configured by the sync period, as a result scaling performance is bound by this sync period meaning there is a lag to scaling activity.
|
||||
3. Relatively large amounts of API requests required to maintain this metric, you may run in API rate limiting issues depending on the size of your environment and how aggressive your sync period configuration is
|
||||
|
||||
|
||||
@@ -349,20 +341,20 @@ spec:
|
||||
|
||||
**PercentageRunnersBusy**
|
||||
|
||||
The `HorizontalRunnerAutoscaler` will pole GitHub based on the configuration sync period for the number of busy runners which live in the RunnerDeployment's namespace and scale based on the settings
|
||||
The `HorizontalRunnerAutoscaler` will poll GitHub based on the configuration sync period for the number of busy runners which live in the RunnerDeployment's namespace and scale based on the settings
|
||||
|
||||
**Kustomize Config :** The period can be customised in the `config/default/manager_auth_proxy_patch.yaml` patch<br />
|
||||
**Helm Config :** `syncPeriod`
|
||||
|
||||
**Benefits of this metric**
|
||||
1. Allows for multiple controllers to be deployed as each controller deployed is responsible for scaling their own runner pods on a per namespace basis.
|
||||
2. Supports named repositories server side the same as the `TotalNumberOfQueuedAndInProgressWorkflowRuns` metric [#313](https://github.com/summerwind/actions-runner-controller/pull/313)
|
||||
3. Supports github organisation wide scaling without maintaining an explicit list of repositories, this is especially useful for those that are working at a larger scale. [#223](https://github.com/summerwind/actions-runner-controller/pull/223)
|
||||
4. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [Github labels](#runner-labels)
|
||||
5. Supports scaling runner count on both a percentage increase / descrease basis as well as on a fixed runner count basis [#223](https://github.com/summerwind/actions-runner-controller/pull/223) [#315](https://github.com/summerwind/actions-runner-controller/pull/315)
|
||||
1. Supports named repositories server side the same as the `TotalNumberOfQueuedAndInProgressWorkflowRuns` metric [#313](https://github.com/summerwind/actions-runner-controller/pull/313)
|
||||
2. Supports GitHub organisation wide scaling without maintaining an explicit list of repositories, this is especially useful for those that are working at a larger scale. [#223](https://github.com/summerwind/actions-runner-controller/pull/223)
|
||||
3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [Github labels](#runner-labels)
|
||||
4. Supports scaling desired runner count on both a percentage increase / decrease basis as well as on a fixed increase / decrease count basis [#223](https://github.com/summerwind/actions-runner-controller/pull/223) [#315](https://github.com/summerwind/actions-runner-controller/pull/315)
|
||||
|
||||
**Drawbacks of this metric**
|
||||
1. May not scale quick enough for some users needs as we are scaling up and down based on indicative information rather than a direct count of the workflow queue depth
|
||||
1. May not scale quick enough for some users needs. This metric is pull based and so the number of busy runners are polled as configured by the sync period, as a result scaling performance is bound by this sync period meaning there is a lag to scaling activity.
|
||||
2. We are scaling up and down based on indicative information rather than a count of the actual number of queued jobs and so the desired runner count is likely to under provision new runners or overprovision them relative to actual job queue depth, this may or may not be a problem for you.
|
||||
|
||||
|
||||
Examples of each scaling type implemented with a `RunnerDeployment` backed by a `HorizontalRunnerAutoscaler`:
|
||||
@@ -419,11 +411,11 @@ spec:
|
||||
> Please get prepared to put some time and effort to learn and leverage this feature!
|
||||
|
||||
`actions-runner-controller` has an optional Webhook server that receives GitHub Webhook events and scale
|
||||
[`RunnerDeployment`s](#runnerdeployments) by updating corresponding [`HorizontalRunnerAutoscaler`s](#autoscaling).
|
||||
[`RunnerDeployments`](#runnerdeployments) by updating corresponding [`HorizontalRunnerAutoscalers`](#autoscaling).
|
||||
|
||||
Today, the Webhook server can be configured to respond GitHub `check_run`, `pull_request`, and `push` events
|
||||
by scaling up the matching `HorizontalRunnerAutoscaler` by N replica(s), where `N` is configurable within
|
||||
`HorizontalRunerAutoscaler`'s `Spec`.
|
||||
`HorizontalRunerAutoscaler's` `Spec`.
|
||||
|
||||
More concretely, you can configure the targeted GitHub event types and the `N` in
|
||||
`scaleUpTriggers`:
|
||||
@@ -433,7 +425,7 @@ kind: HorizontalRunnerAutoscaler
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
name: myrunners
|
||||
scaleUpTrigggers:
|
||||
scaleUpTriggers:
|
||||
- githubEvent:
|
||||
checkRun:
|
||||
types: ["created"]
|
||||
@@ -492,7 +484,7 @@ kind: HorizontalRunnerAutoscaler
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
name: myrunners
|
||||
scaleUpTrigggers:
|
||||
scaleUpTriggers:
|
||||
- githubEvent:
|
||||
checkRun:
|
||||
types: ["created"]
|
||||
@@ -514,7 +506,7 @@ kind: HorizontalRunnerAutoscaler
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
name: myrunners
|
||||
scaleUpTrigggers:
|
||||
scaleUpTriggers:
|
||||
- githubEvent:
|
||||
pullRequest:
|
||||
types: ["synchronize"]
|
||||
@@ -613,6 +605,17 @@ spec:
|
||||
# You can customise this setting allowing you to change the default working directory location
|
||||
# for example, the below setting is the same as on the ubuntu-18.04 image
|
||||
workDir: /home/runner/work
|
||||
# You can mount some of the shared volumes to the dind container using dockerVolumeMounts, like any other volume mounting.
|
||||
# NOTE: in case you want to use an hostPath like the following example, make sure that Kubernetes doesn't schedule more than one runner
|
||||
# per physical host. You can achieve that by setting pod anti-affinity rules and/or resource requests/limits.
|
||||
volumes:
|
||||
- name: docker-extra
|
||||
hostPath:
|
||||
path: /mnt/docker-extra
|
||||
type: DirectoryOrCreate
|
||||
dockerVolumeMounts:
|
||||
- mountPath: /var/lib/docker
|
||||
name: docker-extra
|
||||
```
|
||||
|
||||
### Runner labels
|
||||
@@ -745,8 +748,11 @@ Your base64'ed PAT token has a new line at the end, it needs to be created witho
|
||||
* `echo -n $TOKEN | base64`
|
||||
* Create the secret as described in the docs using the shell and documeneted flags
|
||||
|
||||
# Developing
|
||||
# Contributing
|
||||
|
||||
For more details about any requirements or process, please check out [Getting Started with Contributing](CONTRIBUTING.md).
|
||||
|
||||
**The Controller**<br />
|
||||
If you'd like to modify the controller to fork or contribute, I'd suggest using the following snippet for running
|
||||
the acceptance test:
|
||||
|
||||
@@ -759,7 +765,7 @@ NAME=$DOCKER_USER/actions-runner-controller \
|
||||
APP_ID=*** \
|
||||
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||
INSTALLATION_ID=*** \
|
||||
make docker-build docker-push acceptance
|
||||
make docker-build acceptance
|
||||
```
|
||||
|
||||
Please follow the instructions explained in [Using Personal Access Token](#using-personal-access-token) to obtain
|
||||
@@ -780,19 +786,9 @@ NAME=$DOCKER_USER/actions-runner-controller \
|
||||
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||
INSTALLATION_ID=*** \
|
||||
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
||||
make docker-build docker-push \
|
||||
acceptance/setup acceptance/tests
|
||||
make docker-build acceptance/setup \
|
||||
acceptance/tests
|
||||
```
|
||||
# Alternatives
|
||||
|
||||
The following is a list of alternative solutions that may better fit you depending on your use-case:
|
||||
|
||||
- <https://github.com/evryfs/github-actions-runner-operator/>
|
||||
- <https://github.com/philips-labs/terraform-aws-github-runner/>
|
||||
|
||||
Although the situation can change over time, as of writing this sentence, the benefits of using `actions-runner-controller` over the alternatives are:
|
||||
|
||||
- `actions-runner-controller` has the ability to autoscale runners based on number of pending/progressing jobs (#99)
|
||||
- `actions-runner-controller` is able to gracefully stop runners (#103)
|
||||
- `actions-runner-controller` has ARM support
|
||||
- `actions-runner-controller` has GitHub Enterprise support (see [GitHub Enterprise support](#github-enterprise-support) section for caveats)
|
||||
**Runner Tests**<br />
|
||||
A set of example pipelines (./acceptance/pipelines) are provided in this repository which you can use to validate your runners are working as expected. When raising a PR please run the relevant suites to prove your change hasn't broken anything.
|
||||
|
||||
@@ -12,6 +12,9 @@ done
|
||||
|
||||
echo Found runner ${runner_name}.
|
||||
|
||||
# Wait a bit to make sure the runner pod is created before looking for it.
|
||||
sleep 2
|
||||
|
||||
pod_name=
|
||||
|
||||
while [ -z "${pod_name}" ]; do
|
||||
@@ -24,6 +27,6 @@ echo Found pod ${pod_name}.
|
||||
|
||||
echo Waiting for pod ${runner_name} to become ready... 1>&2
|
||||
|
||||
kubectl wait pod/${runner_name} --for condition=ready --timeout 180s
|
||||
kubectl wait pod/${runner_name} --for condition=ready --timeout 270s
|
||||
|
||||
echo All tests passed. 1>&2
|
||||
|
||||
@@ -26,13 +26,14 @@ if [ "${tool}" == "helm" ]; then
|
||||
charts/actions-runner-controller \
|
||||
-n actions-runner-system \
|
||||
--create-namespace \
|
||||
--set syncPeriod=5m
|
||||
kubectl -n actions-runner-system wait deploy/actions-runner-controller --for condition=available
|
||||
--set syncPeriod=5m \
|
||||
--set authSecret.create=false
|
||||
kubectl -n actions-runner-system wait deploy/actions-runner-controller --for condition=available --timeout 60s
|
||||
else
|
||||
kubectl apply \
|
||||
-n actions-runner-system \
|
||||
-f release/actions-runner-controller.yaml
|
||||
kubectl -n actions-runner-system wait deploy/controller-manager --for condition=available --timeout 60s
|
||||
kubectl -n actions-runner-system wait deploy/controller-manager --for condition=available --timeout 120s
|
||||
fi
|
||||
|
||||
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
|
||||
|
||||
36
acceptance/pipelines/eks-integration-tests.yaml
Normal file
36
acceptance/pipelines/eks-integration-tests.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
name: EKS Integration Tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
IRSA_ROLE_ARN:
|
||||
ASSUME_ROLE_ARN:
|
||||
AWS_REGION:
|
||||
|
||||
jobs:
|
||||
assume-role-in-runner-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
steps:
|
||||
- name: Test aws-actions/configure-aws-credentials Action
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
|
||||
role-duration-seconds: 900
|
||||
assume-role-in-container-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
container:
|
||||
image: amazon/aws-cli
|
||||
env:
|
||||
AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||
AWS_ROLE_ARN: ${{ env.IRSA_ROLE_ARN }}
|
||||
volumes:
|
||||
- /var/run/secrets/eks.amazonaws.com/serviceaccount/token:/var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||
steps:
|
||||
- name: Test aws-actions/configure-aws-credentials Action in container
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
|
||||
role-duration-seconds: 900
|
||||
83
acceptance/pipelines/runner-integration-tests.yaml
Normal file
83
acceptance/pipelines/runner-integration-tests.yaml
Normal file
@@ -0,0 +1,83 @@
|
||||
name: Runner Integration Tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
ImageOS: ubuntu18 # Used by ruby/setup-ruby action | Update me for the runner OS version you are testing against
|
||||
|
||||
jobs:
|
||||
run-step-in-container-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
container:
|
||||
image: alpine
|
||||
steps:
|
||||
- name: Test we are working in the container
|
||||
run: |
|
||||
if [[ $(sed -n '2p' < /etc/os-release | cut -d "=" -f2) != "alpine" ]]; then
|
||||
echo "::error ::Failed OS detection test, could not match /etc/os-release with alpine. Are we really running in the container?"
|
||||
echo "/etc/os-release below:"
|
||||
cat /etc/os-release
|
||||
exit 1
|
||||
fi
|
||||
setup-python-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
steps:
|
||||
- name: Print native Python environment
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Test actions/setup-python works
|
||||
run: |
|
||||
VERSION=$(python --version 2>&1 | cut -d ' ' -f2 | cut -d '.' -f1-2)
|
||||
if [[ $VERSION != '3.9' ]]; then
|
||||
echo "Python version detected : $(python --version 2>&1)"
|
||||
echo "::error ::Detected python failed setup version test, could not match version with version specified in the setup action"
|
||||
exit 1
|
||||
else
|
||||
echo "Python version detected : $(python --version 2>&1)"
|
||||
fi
|
||||
setup-node-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
steps:
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '12'
|
||||
- name: Test actions/setup-node works
|
||||
run: |
|
||||
VERSION=$(node --version | cut -c 2- | cut -d '.' -f1)
|
||||
if [[ $VERSION != '12' ]]; then
|
||||
echo "Node version detected : $(node --version 2>&1)"
|
||||
echo "::error ::Detected node failed setup version test, could not match version with version specified in the setup action"
|
||||
exit 1
|
||||
else
|
||||
echo "Node version detected : $(node --version 2>&1)"
|
||||
fi
|
||||
setup-ruby-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
steps:
|
||||
- uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: 3.0
|
||||
bundler-cache: true
|
||||
- name: Test ruby/setup-ruby works
|
||||
run: |
|
||||
VERSION=$(ruby --version | cut -d ' ' -f2 | cut -d '.' -f1-2)
|
||||
if [[ $VERSION != '3.0' ]]; then
|
||||
echo "Ruby version detected : $(ruby --version 2>&1)"
|
||||
echo "::error ::Detected ruby failed setup version test, could not match version with version specified in the setup action"
|
||||
exit 1
|
||||
else
|
||||
echo "Ruby version detected : $(ruby --version 2>&1)"
|
||||
fi
|
||||
python-shell-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
steps:
|
||||
- name: Test Python shell works
|
||||
run: |
|
||||
import os
|
||||
print(os.environ['PATH'])
|
||||
shell: python
|
||||
11
acceptance/testdata/runnerdeploy.yaml
vendored
11
acceptance/testdata/runnerdeploy.yaml
vendored
@@ -7,3 +7,14 @@ spec:
|
||||
template:
|
||||
spec:
|
||||
repository: mumoshu/actions-runner-controller-ci
|
||||
#
|
||||
# dockerd within runner container
|
||||
#
|
||||
## Replace `mumoshu/actions-runner-dind:dev` with your dind image
|
||||
#dockerdWithinRunnerContainer: true
|
||||
#image: mumoshu/actions-runner-dind:dev
|
||||
|
||||
#
|
||||
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
|
||||
#
|
||||
#dockerMTU: 1450
|
||||
|
||||
@@ -72,6 +72,12 @@ type GitHubEventScaleUpTriggerSpec struct {
|
||||
type CheckRunSpec struct {
|
||||
Types []string `json:"types,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
|
||||
// Names is a list of GitHub Actions glob patterns.
|
||||
// Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
|
||||
// Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
|
||||
// So it is very likely that you can utilize this to trigger depending on the job.
|
||||
Names []string `json:"names,omitempty"`
|
||||
}
|
||||
|
||||
// https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||
@@ -150,6 +156,7 @@ type HorizontalRunnerAutoscalerStatus struct {
|
||||
DesiredReplicas *int `json:"desiredReplicas,omitempty"`
|
||||
|
||||
// +optional
|
||||
// +nullable
|
||||
LastSuccessfulScaleOutTime *metav1.Time `json:"lastSuccessfulScaleOutTime,omitempty"`
|
||||
|
||||
// +optional
|
||||
|
||||
@@ -18,6 +18,7 @@ package v1alpha1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -48,6 +49,8 @@ type RunnerSpec struct {
|
||||
// +optional
|
||||
DockerdContainerResources corev1.ResourceRequirements `json:"dockerdContainerResources,omitempty"`
|
||||
// +optional
|
||||
DockerVolumeMounts []corev1.VolumeMount `json:"dockerVolumeMounts,omitempty"`
|
||||
// +optional
|
||||
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
|
||||
// +optional
|
||||
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
|
||||
@@ -92,6 +95,12 @@ type RunnerSpec struct {
|
||||
DockerdWithinRunnerContainer *bool `json:"dockerdWithinRunnerContainer,omitempty"`
|
||||
// +optional
|
||||
DockerEnabled *bool `json:"dockerEnabled,omitempty"`
|
||||
// +optional
|
||||
DockerMTU *int64 `json:"dockerMTU,omitempty"`
|
||||
// +optional
|
||||
HostAliases []corev1.HostAlias `json:"hostAliases,omitempty"`
|
||||
// +optional
|
||||
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
|
||||
}
|
||||
|
||||
// ValidateRepository validates repository field.
|
||||
@@ -119,10 +128,17 @@ func (rs *RunnerSpec) ValidateRepository() error {
|
||||
|
||||
// RunnerStatus defines the observed state of Runner
|
||||
type RunnerStatus struct {
|
||||
// +optional
|
||||
Registration RunnerStatusRegistration `json:"registration"`
|
||||
Phase string `json:"phase"`
|
||||
Reason string `json:"reason"`
|
||||
Message string `json:"message"`
|
||||
// +optional
|
||||
Phase string `json:"phase,omitempty"`
|
||||
// +optional
|
||||
Reason string `json:"reason,omitempty"`
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
// +optional
|
||||
// +nullable
|
||||
LastRegistrationCheckTime *metav1.Time `json:"lastRegistrationCheckTime,omitempty"`
|
||||
}
|
||||
|
||||
// RunnerStatusRegistration contains runner registration status
|
||||
|
||||
@@ -25,13 +25,16 @@ const (
|
||||
AutoscalingMetricTypePercentageRunnersBusy = "PercentageRunnersBusy"
|
||||
)
|
||||
|
||||
// RunnerReplicaSetSpec defines the desired state of RunnerDeployment
|
||||
// RunnerDeploymentSpec defines the desired state of RunnerDeployment
|
||||
type RunnerDeploymentSpec struct {
|
||||
// +optional
|
||||
// +nullable
|
||||
Replicas *int `json:"replicas,omitempty"`
|
||||
|
||||
Template RunnerTemplate `json:"template"`
|
||||
// +optional
|
||||
// +nullable
|
||||
Selector *metav1.LabelSelector `json:"selector"`
|
||||
Template RunnerTemplate `json:"template"`
|
||||
}
|
||||
|
||||
type RunnerDeploymentStatus struct {
|
||||
|
||||
@@ -26,7 +26,10 @@ type RunnerReplicaSetSpec struct {
|
||||
// +nullable
|
||||
Replicas *int `json:"replicas,omitempty"`
|
||||
|
||||
Template RunnerTemplate `json:"template"`
|
||||
// +optional
|
||||
// +nullable
|
||||
Selector *metav1.LabelSelector `json:"selector"`
|
||||
Template RunnerTemplate `json:"template"`
|
||||
}
|
||||
|
||||
type RunnerReplicaSetStatus struct {
|
||||
|
||||
@@ -22,6 +22,7 @@ package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -65,6 +66,11 @@ func (in *CheckRunSpec) DeepCopyInto(out *CheckRunSpec) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Names != nil {
|
||||
in, out := &in.Names, &out.Names
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckRunSpec.
|
||||
@@ -403,6 +409,11 @@ func (in *RunnerDeploymentSpec) DeepCopyInto(out *RunnerDeploymentSpec) {
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = new(metav1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.Template.DeepCopyInto(&out.Template)
|
||||
}
|
||||
|
||||
@@ -535,6 +546,11 @@ func (in *RunnerReplicaSetSpec) DeepCopyInto(out *RunnerReplicaSetSpec) {
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = new(metav1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.Template.DeepCopyInto(&out.Template)
|
||||
}
|
||||
|
||||
@@ -579,6 +595,13 @@ func (in *RunnerSpec) DeepCopyInto(out *RunnerSpec) {
|
||||
}
|
||||
}
|
||||
in.DockerdContainerResources.DeepCopyInto(&out.DockerdContainerResources)
|
||||
if in.DockerVolumeMounts != nil {
|
||||
in, out := &in.DockerVolumeMounts, &out.DockerVolumeMounts
|
||||
*out = make([]v1.VolumeMount, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
in.Resources.DeepCopyInto(&out.Resources)
|
||||
if in.VolumeMounts != nil {
|
||||
in, out := &in.VolumeMounts, &out.VolumeMounts
|
||||
@@ -678,6 +701,23 @@ func (in *RunnerSpec) DeepCopyInto(out *RunnerSpec) {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.DockerMTU != nil {
|
||||
in, out := &in.DockerMTU, &out.DockerMTU
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.HostAliases != nil {
|
||||
in, out := &in.HostAliases, &out.HostAliases
|
||||
*out = make([]v1.HostAlias, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.VolumeSizeLimit != nil {
|
||||
in, out := &in.VolumeSizeLimit, &out.VolumeSizeLimit
|
||||
x := (*in).DeepCopy()
|
||||
*out = &x
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSpec.
|
||||
@@ -694,6 +734,10 @@ func (in *RunnerSpec) DeepCopy() *RunnerSpec {
|
||||
func (in *RunnerStatus) DeepCopyInto(out *RunnerStatus) {
|
||||
*out = *in
|
||||
in.Registration.DeepCopyInto(&out.Registration)
|
||||
if in.LastRegistrationCheckTime != nil {
|
||||
in, out := &in.LastRegistrationCheckTime, &out.LastRegistrationCheckTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerStatus.
|
||||
|
||||
@@ -15,7 +15,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.1
|
||||
version: 0.11.0
|
||||
|
||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||
appVersion: 0.18.2
|
||||
|
||||
home: https://github.com/summerwind/actions-runner-controller
|
||||
|
||||
|
||||
@@ -22,6 +22,9 @@ resources:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
authSecret:
|
||||
create: false
|
||||
|
||||
# Set the following to true to create a dummy secret, allowing the manager pod to start
|
||||
# This is only useful in CI
|
||||
createDummySecret: true
|
||||
@@ -148,6 +148,17 @@ spec:
|
||||
checkRun:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||
properties:
|
||||
names:
|
||||
description: Names is a list of GitHub Actions glob patterns.
|
||||
Any check_run event whose name matches one of patterns
|
||||
in the list can trigger autoscaling. Note that check_run
|
||||
name seem to equal to the job name you've defined in
|
||||
your actions workflow yaml file. So it is very likely
|
||||
that you can utilize this to trigger depending on the
|
||||
job.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
status:
|
||||
type: string
|
||||
types:
|
||||
@@ -196,6 +207,7 @@ spec:
|
||||
type: integer
|
||||
lastSuccessfulScaleOutTime:
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration is the most recent generation observed
|
||||
|
||||
@@ -38,11 +38,42 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: RunnerReplicaSetSpec defines the desired state of RunnerDeployment
|
||||
description: RunnerDeploymentSpec defines the desired state of RunnerDeployment
|
||||
properties:
|
||||
replicas:
|
||||
nullable: true
|
||||
type: integer
|
||||
selector:
|
||||
description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.
|
||||
nullable: true
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
template:
|
||||
properties:
|
||||
metadata:
|
||||
@@ -402,6 +433,36 @@ spec:
|
||||
type: array
|
||||
dockerEnabled:
|
||||
type: boolean
|
||||
dockerMTU:
|
||||
format: int64
|
||||
type: integer
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
properties:
|
||||
mountPath:
|
||||
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||
type: string
|
||||
mountPropagation:
|
||||
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||
type: string
|
||||
name:
|
||||
description: This must match the Name of a Volume.
|
||||
type: string
|
||||
readOnly:
|
||||
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||
type: boolean
|
||||
subPath:
|
||||
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||
type: string
|
||||
subPathExpr:
|
||||
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||
type: string
|
||||
required:
|
||||
- mountPath
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
dockerdContainerResources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
@@ -546,6 +607,20 @@ spec:
|
||||
type: array
|
||||
group:
|
||||
type: string
|
||||
hostAliases:
|
||||
items:
|
||||
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||
properties:
|
||||
hostnames:
|
||||
description: Hostnames for the above IP address.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
ip:
|
||||
description: IP address of the host file entry.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
image:
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
@@ -734,6 +809,12 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
volumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
volumes:
|
||||
items:
|
||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||
|
||||
@@ -43,6 +43,37 @@ spec:
|
||||
replicas:
|
||||
nullable: true
|
||||
type: integer
|
||||
selector:
|
||||
description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.
|
||||
nullable: true
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
template:
|
||||
properties:
|
||||
metadata:
|
||||
@@ -402,6 +433,36 @@ spec:
|
||||
type: array
|
||||
dockerEnabled:
|
||||
type: boolean
|
||||
dockerMTU:
|
||||
format: int64
|
||||
type: integer
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
properties:
|
||||
mountPath:
|
||||
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||
type: string
|
||||
mountPropagation:
|
||||
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||
type: string
|
||||
name:
|
||||
description: This must match the Name of a Volume.
|
||||
type: string
|
||||
readOnly:
|
||||
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||
type: boolean
|
||||
subPath:
|
||||
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||
type: string
|
||||
subPathExpr:
|
||||
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||
type: string
|
||||
required:
|
||||
- mountPath
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
dockerdContainerResources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
@@ -546,6 +607,20 @@ spec:
|
||||
type: array
|
||||
group:
|
||||
type: string
|
||||
hostAliases:
|
||||
items:
|
||||
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||
properties:
|
||||
hostnames:
|
||||
description: Hostnames for the above IP address.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
ip:
|
||||
description: IP address of the host file entry.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
image:
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
@@ -734,6 +809,12 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
volumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
volumes:
|
||||
items:
|
||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||
|
||||
@@ -398,6 +398,36 @@ spec:
|
||||
type: array
|
||||
dockerEnabled:
|
||||
type: boolean
|
||||
dockerMTU:
|
||||
format: int64
|
||||
type: integer
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
properties:
|
||||
mountPath:
|
||||
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||
type: string
|
||||
mountPropagation:
|
||||
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||
type: string
|
||||
name:
|
||||
description: This must match the Name of a Volume.
|
||||
type: string
|
||||
readOnly:
|
||||
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||
type: boolean
|
||||
subPath:
|
||||
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||
type: string
|
||||
subPathExpr:
|
||||
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||
type: string
|
||||
required:
|
||||
- mountPath
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
dockerdContainerResources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
@@ -542,6 +572,20 @@ spec:
|
||||
type: array
|
||||
group:
|
||||
type: string
|
||||
hostAliases:
|
||||
items:
|
||||
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||
properties:
|
||||
hostnames:
|
||||
description: Hostnames for the above IP address.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
ip:
|
||||
description: IP address of the host file entry.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
image:
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
@@ -730,6 +774,12 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
volumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
volumes:
|
||||
items:
|
||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||
@@ -1538,6 +1588,10 @@ spec:
|
||||
status:
|
||||
description: RunnerStatus defines the observed state of Runner
|
||||
properties:
|
||||
lastRegistrationCheckTime:
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
message:
|
||||
type: string
|
||||
phase:
|
||||
@@ -1566,11 +1620,6 @@ spec:
|
||||
- expiresAt
|
||||
- token
|
||||
type: object
|
||||
required:
|
||||
- message
|
||||
- phase
|
||||
- reason
|
||||
- registration
|
||||
type: object
|
||||
type: object
|
||||
version: v1alpha1
|
||||
|
||||
@@ -5,7 +5,7 @@ apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller.selfsignedIssuerName" . }}
|
||||
namespace: {{ .Namespace }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selfSigned: {}
|
||||
---
|
||||
@@ -13,7 +13,7 @@ apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller.servingCertName" . }}
|
||||
namespace: {{ .Namespace }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
dnsNames:
|
||||
- {{ include "actions-runner-controller.webhookServiceName" . }}.{{ .Release.Namespace }}.svc
|
||||
|
||||
@@ -35,6 +35,9 @@ spec:
|
||||
- "--enable-leader-election"
|
||||
- "--sync-period={{ .Values.syncPeriod }}"
|
||||
- "--docker-image={{ .Values.image.dindSidecarRepositoryAndTag }}"
|
||||
{{- if .Values.scope.singleNamespace }}
|
||||
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/manager"
|
||||
env:
|
||||
@@ -62,7 +65,7 @@ spec:
|
||||
- name: {{ $key }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||
name: manager
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
|
||||
@@ -47,7 +47,7 @@ spec:
|
||||
- name: {{ $key }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||
name: github-webhook-server
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{{- if .Values.githubWebhookServer.ingress.enabled -}}
|
||||
{{- $fullName := include "actions-runner-controller-github-webhook-server.fullname" . -}}
|
||||
{{- $svcPort := .Values.githubWebhookServer.service.port -}}
|
||||
{{- $svcPort := (index .Values.githubWebhookServer.service.ports 0).port -}}
|
||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
|
||||
@@ -22,7 +22,6 @@ authSecret:
|
||||
|
||||
image:
|
||||
repository: summerwind/actions-runner-controller
|
||||
tag: "v0.17.0"
|
||||
dindSidecarRepositoryAndTag: "docker:dind"
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
@@ -100,6 +99,13 @@ env:
|
||||
# https_proxy: "proxy.com:8080"
|
||||
# no_proxy: ""
|
||||
|
||||
scope:
|
||||
# If true, the controller will only watch custom resources in a single namespace
|
||||
singleNamespace: false
|
||||
# If `scope.singleNamespace=true`, the controller will only watch custom resources in this namespace
|
||||
# The default value is "", which means the namespace of the controller
|
||||
watchNamespace: ""
|
||||
|
||||
githubWebhookServer:
|
||||
enabled: false
|
||||
labels: {}
|
||||
|
||||
@@ -110,7 +110,7 @@ func main() {
|
||||
Recorder: nil,
|
||||
Scheme: mgr.GetScheme(),
|
||||
SecretKeyBytes: []byte(webhookSecretToken),
|
||||
WatchNamespace: watchNamespace,
|
||||
Namespace: watchNamespace,
|
||||
}
|
||||
|
||||
if err = hraGitHubWebhook.SetupWithManager(mgr); err != nil {
|
||||
|
||||
@@ -148,6 +148,17 @@ spec:
|
||||
checkRun:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||
properties:
|
||||
names:
|
||||
description: Names is a list of GitHub Actions glob patterns.
|
||||
Any check_run event whose name matches one of patterns
|
||||
in the list can trigger autoscaling. Note that check_run
|
||||
name seem to equal to the job name you've defined in
|
||||
your actions workflow yaml file. So it is very likely
|
||||
that you can utilize this to trigger depending on the
|
||||
job.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
status:
|
||||
type: string
|
||||
types:
|
||||
@@ -196,6 +207,7 @@ spec:
|
||||
type: integer
|
||||
lastSuccessfulScaleOutTime:
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration is the most recent generation observed
|
||||
|
||||
@@ -38,11 +38,42 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: RunnerReplicaSetSpec defines the desired state of RunnerDeployment
|
||||
description: RunnerDeploymentSpec defines the desired state of RunnerDeployment
|
||||
properties:
|
||||
replicas:
|
||||
nullable: true
|
||||
type: integer
|
||||
selector:
|
||||
description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.
|
||||
nullable: true
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
template:
|
||||
properties:
|
||||
metadata:
|
||||
@@ -402,6 +433,36 @@ spec:
|
||||
type: array
|
||||
dockerEnabled:
|
||||
type: boolean
|
||||
dockerMTU:
|
||||
format: int64
|
||||
type: integer
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
properties:
|
||||
mountPath:
|
||||
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||
type: string
|
||||
mountPropagation:
|
||||
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||
type: string
|
||||
name:
|
||||
description: This must match the Name of a Volume.
|
||||
type: string
|
||||
readOnly:
|
||||
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||
type: boolean
|
||||
subPath:
|
||||
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||
type: string
|
||||
subPathExpr:
|
||||
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||
type: string
|
||||
required:
|
||||
- mountPath
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
dockerdContainerResources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
@@ -546,6 +607,20 @@ spec:
|
||||
type: array
|
||||
group:
|
||||
type: string
|
||||
hostAliases:
|
||||
items:
|
||||
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||
properties:
|
||||
hostnames:
|
||||
description: Hostnames for the above IP address.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
ip:
|
||||
description: IP address of the host file entry.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
image:
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
@@ -734,6 +809,12 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
volumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
volumes:
|
||||
items:
|
||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||
|
||||
@@ -43,6 +43,37 @@ spec:
|
||||
replicas:
|
||||
nullable: true
|
||||
type: integer
|
||||
selector:
|
||||
description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.
|
||||
nullable: true
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
template:
|
||||
properties:
|
||||
metadata:
|
||||
@@ -402,6 +433,36 @@ spec:
|
||||
type: array
|
||||
dockerEnabled:
|
||||
type: boolean
|
||||
dockerMTU:
|
||||
format: int64
|
||||
type: integer
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
properties:
|
||||
mountPath:
|
||||
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||
type: string
|
||||
mountPropagation:
|
||||
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||
type: string
|
||||
name:
|
||||
description: This must match the Name of a Volume.
|
||||
type: string
|
||||
readOnly:
|
||||
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||
type: boolean
|
||||
subPath:
|
||||
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||
type: string
|
||||
subPathExpr:
|
||||
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||
type: string
|
||||
required:
|
||||
- mountPath
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
dockerdContainerResources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
@@ -546,6 +607,20 @@ spec:
|
||||
type: array
|
||||
group:
|
||||
type: string
|
||||
hostAliases:
|
||||
items:
|
||||
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||
properties:
|
||||
hostnames:
|
||||
description: Hostnames for the above IP address.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
ip:
|
||||
description: IP address of the host file entry.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
image:
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
@@ -734,6 +809,12 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
volumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
volumes:
|
||||
items:
|
||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||
|
||||
@@ -398,6 +398,36 @@ spec:
|
||||
type: array
|
||||
dockerEnabled:
|
||||
type: boolean
|
||||
dockerMTU:
|
||||
format: int64
|
||||
type: integer
|
||||
dockerVolumeMounts:
|
||||
items:
|
||||
description: VolumeMount describes a mounting of a Volume within a container.
|
||||
properties:
|
||||
mountPath:
|
||||
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||
type: string
|
||||
mountPropagation:
|
||||
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||
type: string
|
||||
name:
|
||||
description: This must match the Name of a Volume.
|
||||
type: string
|
||||
readOnly:
|
||||
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||
type: boolean
|
||||
subPath:
|
||||
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||
type: string
|
||||
subPathExpr:
|
||||
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||
type: string
|
||||
required:
|
||||
- mountPath
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
dockerdContainerResources:
|
||||
description: ResourceRequirements describes the compute resource requirements.
|
||||
properties:
|
||||
@@ -542,6 +572,20 @@ spec:
|
||||
type: array
|
||||
group:
|
||||
type: string
|
||||
hostAliases:
|
||||
items:
|
||||
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||
properties:
|
||||
hostnames:
|
||||
description: Hostnames for the above IP address.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
ip:
|
||||
description: IP address of the host file entry.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
image:
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
@@ -730,6 +774,12 @@ spec:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
volumeSizeLimit:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
volumes:
|
||||
items:
|
||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||
@@ -1538,6 +1588,10 @@ spec:
|
||||
status:
|
||||
description: RunnerStatus defines the observed state of Runner
|
||||
properties:
|
||||
lastRegistrationCheckTime:
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
message:
|
||||
type: string
|
||||
phase:
|
||||
@@ -1566,11 +1620,6 @@ spec:
|
||||
- expiresAt
|
||||
- token
|
||||
type: object
|
||||
required:
|
||||
- message
|
||||
- phase
|
||||
- reason
|
||||
- registration
|
||||
type: object
|
||||
type: object
|
||||
version: v1alpha1
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
@@ -32,7 +34,7 @@ func getValueAvailableAt(now time.Time, from, to *time.Time, reservedValue int)
|
||||
return &reservedValue
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) getDesiredReplicasFromCache(hra v1alpha1.HorizontalRunnerAutoscaler) *int {
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) fetchSuggestedReplicasFromCache(hra v1alpha1.HorizontalRunnerAutoscaler) *int {
|
||||
var entry *v1alpha1.CacheEntry
|
||||
|
||||
for i := range hra.Status.CacheEntries {
|
||||
@@ -61,7 +63,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) getDesiredReplicasFromCache(hra v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) determineDesiredReplicas(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
if hra.Spec.MinReplicas == nil {
|
||||
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
|
||||
} else if hra.Spec.MaxReplicas == nil {
|
||||
@@ -69,16 +71,22 @@ func (r *HorizontalRunnerAutoscalerReconciler) determineDesiredReplicas(rd v1alp
|
||||
}
|
||||
|
||||
metrics := hra.Spec.Metrics
|
||||
if len(metrics) == 0 || metrics[0].Type == v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns {
|
||||
return r.calculateReplicasByQueuedAndInProgressWorkflowRuns(rd, hra)
|
||||
if len(metrics) == 0 {
|
||||
if len(hra.Spec.ScaleUpTriggers) == 0 {
|
||||
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(rd, hra)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
} else if metrics[0].Type == v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns {
|
||||
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(rd, hra)
|
||||
} else if metrics[0].Type == v1alpha1.AutoscalingMetricTypePercentageRunnersBusy {
|
||||
return r.calculateReplicasByPercentageRunnersBusy(rd, hra)
|
||||
return r.suggestReplicasByPercentageRunnersBusy(rd, hra)
|
||||
} else {
|
||||
return nil, fmt.Errorf("validting autoscaling metrics: unsupported metric type %q", metrics[0].Type)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInProgressWorkflowRuns(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
|
||||
var repos [][]string
|
||||
metrics := hra.Spec.Metrics
|
||||
@@ -89,6 +97,13 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInPro
|
||||
return nil, fmt.Errorf("asserting runner deployment spec to detect bug: spec.template.organization should not be empty on this code path")
|
||||
}
|
||||
|
||||
// In case it's an organizational runners deployment without any scaling metrics defined,
|
||||
// we assume that the desired replicas should always be `minReplicas + capacityReservedThroughWebhook`.
|
||||
// See https://github.com/summerwind/actions-runner-controller/issues/377#issuecomment-793372693
|
||||
if len(metrics) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(metrics[0].RepositoryNames) == 0 {
|
||||
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment")
|
||||
}
|
||||
@@ -163,28 +178,10 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInPro
|
||||
}
|
||||
}
|
||||
|
||||
minReplicas := *hra.Spec.MinReplicas
|
||||
maxReplicas := *hra.Spec.MaxReplicas
|
||||
necessaryReplicas := queued + inProgress
|
||||
|
||||
var desiredReplicas int
|
||||
|
||||
if necessaryReplicas < minReplicas {
|
||||
desiredReplicas = minReplicas
|
||||
} else if necessaryReplicas > maxReplicas {
|
||||
desiredReplicas = maxReplicas
|
||||
} else {
|
||||
desiredReplicas = necessaryReplicas
|
||||
}
|
||||
|
||||
rd.Status.Replicas = &desiredReplicas
|
||||
replicas := desiredReplicas
|
||||
|
||||
r.Log.V(1).Info(
|
||||
"Calculated desired replicas",
|
||||
"computed_replicas_desired", desiredReplicas,
|
||||
"spec_replicas_min", minReplicas,
|
||||
"spec_replicas_max", maxReplicas,
|
||||
fmt.Sprintf("Suggested desired replicas of %d by TotalNumberOfQueuedAndInProgressWorkflowRuns", necessaryReplicas),
|
||||
"workflow_runs_completed", completed,
|
||||
"workflow_runs_in_progress", inProgress,
|
||||
"workflow_runs_queued", queued,
|
||||
@@ -194,13 +191,11 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInPro
|
||||
"horizontal_runner_autoscaler", hra.Name,
|
||||
)
|
||||
|
||||
return &replicas, nil
|
||||
return &necessaryReplicas, nil
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunnersBusy(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
ctx := context.Background()
|
||||
minReplicas := *hra.Spec.MinReplicas
|
||||
maxReplicas := *hra.Spec.MaxReplicas
|
||||
metrics := hra.Spec.Metrics[0]
|
||||
scaleUpThreshold := defaultScaleUpThreshold
|
||||
scaleDownThreshold := defaultScaleDownThreshold
|
||||
@@ -259,9 +254,30 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunn
|
||||
|
||||
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
||||
var runnerList v1alpha1.RunnerList
|
||||
if err := r.List(ctx, &runnerList, client.InNamespace(rd.Namespace)); err != nil {
|
||||
|
||||
var opts []client.ListOption
|
||||
|
||||
opts = append(opts, client.InNamespace(rd.Namespace))
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(getSelector(&rd))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts = append(opts, client.MatchingLabelsSelector{Selector: selector})
|
||||
|
||||
r.Log.V(2).Info("Finding runners with selector", "ns", rd.Namespace)
|
||||
|
||||
if err := r.List(
|
||||
ctx,
|
||||
&runnerList,
|
||||
opts...,
|
||||
); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
runnerMap := make(map[string]struct{})
|
||||
for _, items := range runnerList.Items {
|
||||
runnerMap[items.Name] = struct{}{}
|
||||
@@ -282,45 +298,62 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunn
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
numRunners := len(runnerList.Items)
|
||||
numRunnersBusy := 0
|
||||
|
||||
var desiredReplicasBefore int
|
||||
|
||||
if v := rd.Spec.Replicas; v == nil {
|
||||
desiredReplicasBefore = 1
|
||||
} else {
|
||||
desiredReplicasBefore = *v
|
||||
}
|
||||
|
||||
var (
|
||||
numRunners int
|
||||
numRunnersRegistered int
|
||||
numRunnersBusy int
|
||||
)
|
||||
|
||||
numRunners = len(runnerList.Items)
|
||||
|
||||
for _, runner := range runners {
|
||||
if _, ok := runnerMap[*runner.Name]; ok && runner.GetBusy() {
|
||||
numRunnersBusy++
|
||||
if _, ok := runnerMap[*runner.Name]; ok {
|
||||
numRunnersRegistered++
|
||||
|
||||
if runner.GetBusy() {
|
||||
numRunnersBusy++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var desiredReplicas int
|
||||
fractionBusy := float64(numRunnersBusy) / float64(numRunners)
|
||||
fractionBusy := float64(numRunnersBusy) / float64(desiredReplicasBefore)
|
||||
if fractionBusy >= scaleUpThreshold {
|
||||
if scaleUpAdjustment > 0 {
|
||||
desiredReplicas = numRunners + scaleUpAdjustment
|
||||
desiredReplicas = desiredReplicasBefore + scaleUpAdjustment
|
||||
} else {
|
||||
desiredReplicas = int(math.Ceil(float64(numRunners) * scaleUpFactor))
|
||||
desiredReplicas = int(math.Ceil(float64(desiredReplicasBefore) * scaleUpFactor))
|
||||
}
|
||||
} else if fractionBusy < scaleDownThreshold {
|
||||
if scaleDownAdjustment > 0 {
|
||||
desiredReplicas = numRunners - scaleDownAdjustment
|
||||
desiredReplicas = desiredReplicasBefore - scaleDownAdjustment
|
||||
} else {
|
||||
desiredReplicas = int(float64(numRunners) * scaleDownFactor)
|
||||
desiredReplicas = int(float64(desiredReplicasBefore) * scaleDownFactor)
|
||||
}
|
||||
} else {
|
||||
desiredReplicas = *rd.Spec.Replicas
|
||||
}
|
||||
|
||||
if desiredReplicas < minReplicas {
|
||||
desiredReplicas = minReplicas
|
||||
} else if desiredReplicas > maxReplicas {
|
||||
desiredReplicas = maxReplicas
|
||||
}
|
||||
// NOTES for operators:
|
||||
//
|
||||
// - num_runners can be as twice as large as replicas_desired_before while
|
||||
// the runnerdeployment controller is replacing RunnerReplicaSet for runner update.
|
||||
|
||||
r.Log.V(1).Info(
|
||||
"Calculated desired replicas",
|
||||
"computed_replicas_desired", desiredReplicas,
|
||||
"spec_replicas_min", minReplicas,
|
||||
"spec_replicas_max", maxReplicas,
|
||||
"current_replicas", rd.Spec.Replicas,
|
||||
fmt.Sprintf("Suggested desired replicas of %d by PercentageRunnersBusy", desiredReplicas),
|
||||
"replicas_desired_before", desiredReplicasBefore,
|
||||
"replicas_desired", desiredReplicas,
|
||||
"num_runners", numRunners,
|
||||
"num_runners_registered", numRunnersRegistered,
|
||||
"num_runners_busy", numRunnersBusy,
|
||||
"namespace", hra.Namespace,
|
||||
"runner_deployment", rd.Name,
|
||||
@@ -330,8 +363,5 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunn
|
||||
"repository", repository,
|
||||
)
|
||||
|
||||
rd.Status.Replicas = &desiredReplicas
|
||||
replicas := desiredReplicas
|
||||
|
||||
return &replicas, nil
|
||||
return &desiredReplicas, nil
|
||||
}
|
||||
|
||||
@@ -224,7 +224,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
got, err := h.computeReplicas(rd, hra)
|
||||
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, rd, hra)
|
||||
if err != nil {
|
||||
if tc.err == "" {
|
||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||
@@ -234,12 +234,8 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
if got == nil {
|
||||
t.Fatalf("unexpected value of rs.Spec.Replicas: nil")
|
||||
}
|
||||
|
||||
if *got != tc.want {
|
||||
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, *got)
|
||||
if got != tc.want {
|
||||
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -424,6 +420,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
_ = v1alpha1.AddToScheme(scheme)
|
||||
|
||||
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
server := fake.NewServer(
|
||||
fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns, tc.workflowRuns_queued, tc.workflowRuns_in_progress),
|
||||
fake.WithListWorkflowJobsResponse(200, tc.workflowJobs),
|
||||
@@ -443,7 +441,17 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
Name: "testrd",
|
||||
},
|
||||
Spec: v1alpha1.RunnerDeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: v1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.RunnerSpec{
|
||||
Organization: tc.org,
|
||||
},
|
||||
@@ -475,7 +483,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
got, err := h.computeReplicas(rd, hra)
|
||||
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, rd, hra)
|
||||
if err != nil {
|
||||
if tc.err == "" {
|
||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||
@@ -485,12 +493,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
if got == nil {
|
||||
t.Fatalf("unexpected value of rs.Spec.Replicas: nil, wanted %v", tc.want)
|
||||
}
|
||||
|
||||
if *got != tc.want {
|
||||
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, *got)
|
||||
if got != tc.want {
|
||||
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -53,11 +53,11 @@ type HorizontalRunnerAutoscalerGitHubWebhook struct {
|
||||
// the administrator is generated and specified in GitHub Web UI.
|
||||
SecretKeyBytes []byte
|
||||
|
||||
// WatchNamespace is the namespace to watch for HorizontalRunnerAutoscaler's to be
|
||||
// Namespace is the namespace to watch for HorizontalRunnerAutoscaler's to be
|
||||
// scaled on Webhook.
|
||||
// Set to empty for letting it watch for all namespaces.
|
||||
WatchNamespace string
|
||||
Name string
|
||||
Namespace string
|
||||
Name string
|
||||
}
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||
@@ -95,6 +95,12 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
||||
}
|
||||
}()
|
||||
|
||||
// respond ok to GET / e.g. for health check
|
||||
if r.Method == http.MethodGet {
|
||||
fmt.Fprintln(w, "webhook server is running")
|
||||
return
|
||||
}
|
||||
|
||||
var payload []byte
|
||||
|
||||
if len(autoscaler.SecretKeyBytes) > 0 {
|
||||
@@ -153,6 +159,13 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
||||
e.Repo.Owner.GetType(),
|
||||
autoscaler.MatchPullRequestEvent(e),
|
||||
)
|
||||
|
||||
if pullRequest := e.PullRequest; pullRequest != nil {
|
||||
log = log.WithValues(
|
||||
"pullRequest.base.ref", e.PullRequest.Base.GetRef(),
|
||||
"action", e.GetAction(),
|
||||
)
|
||||
}
|
||||
case *gogithub.CheckRunEvent:
|
||||
target, err = autoscaler.getScaleUpTarget(
|
||||
context.TODO(),
|
||||
@@ -162,6 +175,13 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
||||
e.Repo.Owner.GetType(),
|
||||
autoscaler.MatchCheckRunEvent(e),
|
||||
)
|
||||
|
||||
if checkRun := e.GetCheckRun(); checkRun != nil {
|
||||
log = log.WithValues(
|
||||
"checkRun.status", checkRun.GetStatus(),
|
||||
"action", e.GetAction(),
|
||||
)
|
||||
}
|
||||
case *gogithub.PingEvent:
|
||||
ok = true
|
||||
|
||||
@@ -189,9 +209,11 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
||||
}
|
||||
|
||||
if target == nil {
|
||||
msg := "no horizontalrunnerautoscaler to scale for this github event"
|
||||
log.Info(
|
||||
"Scale target not found. If this is unexpected, ensure that there is exactly one repository-wide or organizational runner deployment that matches this webhook event",
|
||||
)
|
||||
|
||||
log.Info(msg, "eventType", webhookType)
|
||||
msg := "no horizontalrunnerautoscaler to scale for this github event"
|
||||
|
||||
ok = true
|
||||
|
||||
@@ -224,7 +246,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
||||
}
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) findHRAsByKey(ctx context.Context, value string) ([]v1alpha1.HorizontalRunnerAutoscaler, error) {
|
||||
ns := autoscaler.WatchNamespace
|
||||
ns := autoscaler.Namespace
|
||||
|
||||
var defaultListOpts []client.ListOption
|
||||
|
||||
@@ -238,8 +260,8 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) findHRAsByKey(ctx con
|
||||
opts := append([]client.ListOption{}, defaultListOpts...)
|
||||
opts = append(opts, client.MatchingFields{scaleTargetKey: value})
|
||||
|
||||
if autoscaler.WatchNamespace != "" {
|
||||
opts = append(opts, client.InNamespace(autoscaler.WatchNamespace))
|
||||
if autoscaler.Namespace != "" {
|
||||
opts = append(opts, client.InNamespace(autoscaler.Namespace))
|
||||
}
|
||||
|
||||
var hraList v1alpha1.HorizontalRunnerAutoscalerList
|
||||
@@ -326,7 +348,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleTarget(ctx co
|
||||
autoscaler.Log.Info(
|
||||
"Found too many scale targets: "+
|
||||
"It must be exactly one to avoid ambiguity. "+
|
||||
"Either set WatchNamespace for the webhook-based autoscaler to let it only find HRAs in the namespace, "+
|
||||
"Either set Namespace for the webhook-based autoscaler to let it only find HRAs in the namespace, "+
|
||||
"or update Repository or Organization fields in your RunnerDeployment resources to fix the ambiguity.",
|
||||
"scaleTargets", strings.Join(scaleTargetIDs, ","))
|
||||
|
||||
@@ -359,10 +381,6 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleUpTarget(ctx
|
||||
return target, nil
|
||||
}
|
||||
|
||||
log.Info(
|
||||
"Scale target not found. If this is unexpected, ensure that there is exactly one repository-wide or organizational runner deployment that matches this webhook event",
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package controllers
|
||||
import (
|
||||
"github.com/google/go-github/v33/github"
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/summerwind/actions-runner-controller/pkg/actionsglob"
|
||||
)
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||
@@ -27,6 +28,16 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(ev
|
||||
return false
|
||||
}
|
||||
|
||||
if checkRun := event.CheckRun; checkRun != nil && len(cr.Names) > 0 {
|
||||
for _, pat := range cr.Names {
|
||||
if r := actionsglob.Match(pat, checkRun.GetName()); r {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -113,6 +113,19 @@ func TestWebhookPing(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
func TestGetRequest(t *testing.T) {
|
||||
hra := HorizontalRunnerAutoscalerGitHubWebhook{}
|
||||
request, _ := http.NewRequest(http.MethodGet, "/", nil)
|
||||
recorder := httptest.ResponseRecorder{}
|
||||
|
||||
hra.Handle(&recorder, request)
|
||||
response := recorder.Result()
|
||||
|
||||
if response.StatusCode != http.StatusOK {
|
||||
t.Errorf("want %d, got %d", http.StatusOK, response.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetValidCapacityReservations(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ package controllers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"time"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/github"
|
||||
@@ -30,10 +31,10 @@ import (
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/summerwind/actions-runner-controller/controllers/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -52,6 +53,8 @@ type HorizontalRunnerAutoscalerReconciler struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
const defaultReplicas = 1
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerdeployments,verbs=get;list;watch;update;patch
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -71,6 +74,8 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
metrics.SetHorizontalRunnerAutoscalerSpec(hra.ObjectMeta, hra.Spec)
|
||||
|
||||
var rd v1alpha1.RunnerDeployment
|
||||
if err := r.Get(ctx, types.NamespacedName{
|
||||
Namespace: req.Namespace,
|
||||
@@ -83,41 +88,18 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
var replicas *int
|
||||
|
||||
replicasFromCache := r.getDesiredReplicasFromCache(hra)
|
||||
|
||||
if replicasFromCache != nil {
|
||||
replicas = replicasFromCache
|
||||
} else {
|
||||
var err error
|
||||
|
||||
replicas, err = r.computeReplicas(rd, hra)
|
||||
if err != nil {
|
||||
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||
|
||||
log.Error(err, "Could not compute replicas")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
const defaultReplicas = 1
|
||||
|
||||
currentDesiredReplicas := getIntOrDefault(rd.Spec.Replicas, defaultReplicas)
|
||||
newDesiredReplicas := getIntOrDefault(replicas, defaultReplicas)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
for _, reservation := range hra.Spec.CapacityReservations {
|
||||
if reservation.ExpirationTime.Time.After(now) {
|
||||
newDesiredReplicas += reservation.Replicas
|
||||
}
|
||||
newDesiredReplicas, computedReplicas, computedReplicasFromCache, err := r.computeReplicasWithCache(log, now, rd, hra)
|
||||
if err != nil {
|
||||
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||
|
||||
log.Error(err, "Could not compute replicas")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if hra.Spec.MaxReplicas != nil && *hra.Spec.MaxReplicas < newDesiredReplicas {
|
||||
newDesiredReplicas = *hra.Spec.MaxReplicas
|
||||
}
|
||||
currentDesiredReplicas := getIntOrDefault(rd.Spec.Replicas, defaultReplicas)
|
||||
|
||||
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
||||
if currentDesiredReplicas != newDesiredReplicas {
|
||||
@@ -143,7 +125,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
||||
updated.Status.DesiredReplicas = &newDesiredReplicas
|
||||
}
|
||||
|
||||
if replicasFromCache == nil {
|
||||
if computedReplicasFromCache == nil {
|
||||
if updated == nil {
|
||||
updated = hra.DeepCopy()
|
||||
}
|
||||
@@ -160,12 +142,14 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
||||
|
||||
updated.Status.CacheEntries = append(cacheEntries, v1alpha1.CacheEntry{
|
||||
Key: v1alpha1.CacheEntryKeyDesiredReplicas,
|
||||
Value: *replicas,
|
||||
Value: computedReplicas,
|
||||
ExpirationTime: metav1.Time{Time: time.Now().Add(cacheDuration)},
|
||||
})
|
||||
}
|
||||
|
||||
if updated != nil {
|
||||
metrics.SetHorizontalRunnerAutoscalerStatus(updated.ObjectMeta, updated.Status)
|
||||
|
||||
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&hra)); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("patching horizontalrunnerautoscaler status to add cache entry: %w", err)
|
||||
}
|
||||
@@ -200,14 +184,59 @@ func (r *HorizontalRunnerAutoscalerReconciler) SetupWithManager(mgr ctrl.Manager
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicas(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
var computedReplicas *int
|
||||
|
||||
replicas, err := r.determineDesiredReplicas(rd, hra)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr.Logger, now time.Time, rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (int, int, *int, error) {
|
||||
minReplicas := defaultReplicas
|
||||
if hra.Spec.MinReplicas != nil && *hra.Spec.MinReplicas > 0 {
|
||||
minReplicas = *hra.Spec.MinReplicas
|
||||
}
|
||||
|
||||
var suggestedReplicas int
|
||||
|
||||
suggestedReplicasFromCache := r.fetchSuggestedReplicasFromCache(hra)
|
||||
|
||||
var cached *int
|
||||
|
||||
if suggestedReplicasFromCache != nil {
|
||||
cached = suggestedReplicasFromCache
|
||||
|
||||
if cached == nil {
|
||||
suggestedReplicas = minReplicas
|
||||
} else {
|
||||
suggestedReplicas = *cached
|
||||
}
|
||||
} else {
|
||||
v, err := r.suggestDesiredReplicas(rd, hra)
|
||||
if err != nil {
|
||||
return 0, 0, nil, err
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
suggestedReplicas = minReplicas
|
||||
} else {
|
||||
suggestedReplicas = *v
|
||||
}
|
||||
}
|
||||
|
||||
var reserved int
|
||||
|
||||
for _, reservation := range hra.Spec.CapacityReservations {
|
||||
if reservation.ExpirationTime.Time.After(now) {
|
||||
reserved += reservation.Replicas
|
||||
}
|
||||
}
|
||||
|
||||
newDesiredReplicas := suggestedReplicas + reserved
|
||||
|
||||
if newDesiredReplicas < minReplicas {
|
||||
newDesiredReplicas = minReplicas
|
||||
} else if hra.Spec.MaxReplicas != nil && newDesiredReplicas > *hra.Spec.MaxReplicas {
|
||||
newDesiredReplicas = *hra.Spec.MaxReplicas
|
||||
}
|
||||
|
||||
//
|
||||
// Delay scaling-down for ScaleDownDelaySecondsAfterScaleUp or DefaultScaleDownDelay
|
||||
//
|
||||
|
||||
var scaleDownDelay time.Duration
|
||||
|
||||
if hra.Spec.ScaleDownDelaySecondsAfterScaleUp != nil {
|
||||
@@ -216,17 +245,50 @@ func (r *HorizontalRunnerAutoscalerReconciler) computeReplicas(rd v1alpha1.Runne
|
||||
scaleDownDelay = DefaultScaleDownDelay
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
var scaleDownDelayUntil *time.Time
|
||||
|
||||
if hra.Status.DesiredReplicas == nil ||
|
||||
*hra.Status.DesiredReplicas < *replicas ||
|
||||
hra.Status.LastSuccessfulScaleOutTime == nil ||
|
||||
hra.Status.LastSuccessfulScaleOutTime.Add(scaleDownDelay).Before(now) {
|
||||
*hra.Status.DesiredReplicas < newDesiredReplicas ||
|
||||
hra.Status.LastSuccessfulScaleOutTime == nil {
|
||||
|
||||
computedReplicas = replicas
|
||||
} else if hra.Status.LastSuccessfulScaleOutTime != nil {
|
||||
t := hra.Status.LastSuccessfulScaleOutTime.Add(scaleDownDelay)
|
||||
|
||||
// ScaleDownDelay is not passed
|
||||
if t.After(now) {
|
||||
scaleDownDelayUntil = &t
|
||||
newDesiredReplicas = *hra.Status.DesiredReplicas
|
||||
}
|
||||
} else {
|
||||
computedReplicas = hra.Status.DesiredReplicas
|
||||
newDesiredReplicas = *hra.Status.DesiredReplicas
|
||||
}
|
||||
|
||||
return computedReplicas, nil
|
||||
//
|
||||
// Logs various numbers for monitoring and debugging purpose
|
||||
//
|
||||
|
||||
kvs := []interface{}{
|
||||
"suggested", suggestedReplicas,
|
||||
"reserved", reserved,
|
||||
"min", minReplicas,
|
||||
}
|
||||
|
||||
if cached != nil {
|
||||
kvs = append(kvs, "cached", *cached)
|
||||
}
|
||||
|
||||
if scaleDownDelayUntil != nil {
|
||||
kvs = append(kvs, "last_scale_up_time", *hra.Status.LastSuccessfulScaleOutTime)
|
||||
kvs = append(kvs, "scale_down_delay_until", scaleDownDelayUntil)
|
||||
}
|
||||
|
||||
if maxReplicas := hra.Spec.MaxReplicas; maxReplicas != nil {
|
||||
kvs = append(kvs, "max", *maxReplicas)
|
||||
}
|
||||
|
||||
log.V(1).Info(fmt.Sprintf("Calculated desired replicas of %d", newDesiredReplicas),
|
||||
kvs...,
|
||||
)
|
||||
|
||||
return newDesiredReplicas, suggestedReplicas, suggestedReplicasFromCache, nil
|
||||
}
|
||||
|
||||
@@ -3,13 +3,14 @@ package controllers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/google/go-github/v33/github"
|
||||
github2 "github.com/summerwind/actions-runner-controller/github"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-github/v33/github"
|
||||
github2 "github.com/summerwind/actions-runner-controller/github"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/github/fake"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -70,7 +71,9 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
err := k8sClient.Create(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Namespace: ns.Name,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||
|
||||
responses := &fake.FixedResponses{}
|
||||
@@ -96,6 +99,21 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
return fmt.Sprintf("%s%s", ns.Name, name)
|
||||
}
|
||||
|
||||
runnerController := &RunnerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
Log: logf.Log,
|
||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||
GitHubClient: env.ghClient,
|
||||
RunnerImage: "example/runner:test",
|
||||
DockerImage: "example/docker:test",
|
||||
Name: controllerName("runner"),
|
||||
RegistrationRecheckInterval: time.Millisecond,
|
||||
RegistrationRecheckJitter: time.Millisecond,
|
||||
}
|
||||
err = runnerController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller")
|
||||
|
||||
replicasetController := &RunnerReplicaSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
@@ -105,7 +123,7 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
Name: controllerName("runnerreplicaset"),
|
||||
}
|
||||
err = replicasetController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerreplicaset controller")
|
||||
|
||||
deploymentsController := &RunnerDeploymentReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
@@ -115,7 +133,7 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
Name: controllerName("runnnerdeployment"),
|
||||
}
|
||||
err = deploymentsController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerdeployment controller")
|
||||
|
||||
autoscalerController := &HorizontalRunnerAutoscalerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
@@ -127,15 +145,15 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
Name: controllerName("horizontalrunnerautoscaler"),
|
||||
}
|
||||
err = autoscalerController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup autoscaler controller")
|
||||
|
||||
autoscalerWebhook := &HorizontalRunnerAutoscalerGitHubWebhook{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
Log: logf.Log,
|
||||
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
|
||||
Name: controllerName("horizontalrunnerautoscalergithubwebhook"),
|
||||
WatchNamespace: ns.Name,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
Log: logf.Log,
|
||||
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
|
||||
Name: controllerName("horizontalrunnerautoscalergithubwebhook"),
|
||||
Namespace: ns.Name,
|
||||
}
|
||||
err = autoscalerWebhook.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup autoscaler webhook")
|
||||
@@ -173,6 +191,93 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
|
||||
Describe("when no existing resources exist", func() {
|
||||
|
||||
It("should create and scale organizational runners without any scaling metrics on pull_request event", func() {
|
||||
name := "example-runnerdeploy"
|
||||
|
||||
{
|
||||
rd := &actionsv1alpha1.RunnerDeployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Organization: "test",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
}
|
||||
|
||||
// Scale-up to 2 replicas
|
||||
{
|
||||
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.HorizontalRunnerAutoscalerSpec{
|
||||
ScaleTargetRef: actionsv1alpha1.ScaleTargetRef{
|
||||
Name: name,
|
||||
},
|
||||
MinReplicas: intPtr(2),
|
||||
MaxReplicas: intPtr(5),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
Metrics: nil,
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
PullRequest: &actionsv1alpha1.PullRequestSpec{
|
||||
Types: []string{"created"},
|
||||
Branches: []string{"main"},
|
||||
},
|
||||
},
|
||||
Amount: 1,
|
||||
Duration: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, hra, "test HorizontalRunnerAutoscaler")
|
||||
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 2)
|
||||
ExpectHRAStatusCacheEntryLengthEventuallyEquals(ctx, ns.Name, name, 1)
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(2, "count of fake runners after HRA creation")
|
||||
}
|
||||
|
||||
// Scale-up to 3 replicas on second pull_request create webhook event
|
||||
{
|
||||
env.SendOrgPullRequestEvent("test", "valid", "main", "created")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3, "runners after second webhook event")
|
||||
}
|
||||
})
|
||||
|
||||
It("should create and scale organization's repository runners on pull_request event", func() {
|
||||
name := "example-runnerdeploy"
|
||||
|
||||
@@ -184,7 +289,17 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
@@ -224,7 +339,11 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
MinReplicas: intPtr(1),
|
||||
MaxReplicas: intPtr(3),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
Metrics: nil,
|
||||
Metrics: []actionsv1alpha1.MetricSpec{
|
||||
{
|
||||
Type: actionsv1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns,
|
||||
},
|
||||
},
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
@@ -301,7 +420,17 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
@@ -339,7 +468,11 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
MinReplicas: intPtr(1),
|
||||
MaxReplicas: intPtr(5),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
Metrics: nil,
|
||||
Metrics: []actionsv1alpha1.MetricSpec{
|
||||
{
|
||||
Type: actionsv1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns,
|
||||
},
|
||||
},
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
@@ -359,10 +492,9 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3)
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
||||
env.SyncRunnerRegistrations()
|
||||
ExpectRunnerCountEventuallyEquals(ctx, ns.Name, 3)
|
||||
}
|
||||
|
||||
// Scale-up to 4 replicas on first check_run create webhook event
|
||||
@@ -370,19 +502,123 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 4, "runners after first webhook event")
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(4, "count of fake list runners")
|
||||
env.SyncRunnerRegistrations()
|
||||
ExpectRunnerCountEventuallyEquals(ctx, ns.Name, 4)
|
||||
}
|
||||
|
||||
// Scale-up to 5 replicas on second check_run create webhook event
|
||||
{
|
||||
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 5, "runners after second webhook event")
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(5, "count of fake list runners")
|
||||
env.SyncRunnerRegistrations()
|
||||
ExpectRunnerCountEventuallyEquals(ctx, ns.Name, 5)
|
||||
}
|
||||
})
|
||||
|
||||
It("should create and scale organization's repository runners only on check_run event", func() {
|
||||
name := "example-runnerdeploy"
|
||||
|
||||
{
|
||||
rd := &actionsv1alpha1.RunnerDeployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
}
|
||||
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(5, "count of fake list runners")
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
||||
}
|
||||
|
||||
// Scale-up to 3 replicas by the default TotalNumberOfQueuedAndInProgressWorkflowRuns-based scaling
|
||||
// See workflowRunsFor3Replicas_queued and workflowRunsFor3Replicas_in_progress for GitHub List-Runners API responses
|
||||
// used while testing.
|
||||
{
|
||||
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.HorizontalRunnerAutoscalerSpec{
|
||||
ScaleTargetRef: actionsv1alpha1.ScaleTargetRef{
|
||||
Name: name,
|
||||
},
|
||||
MinReplicas: intPtr(1),
|
||||
MaxReplicas: intPtr(5),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
CheckRun: &actionsv1alpha1.CheckRunSpec{
|
||||
Types: []string{"created"},
|
||||
Status: "pending",
|
||||
},
|
||||
},
|
||||
Amount: 1,
|
||||
Duration: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, hra, "test HorizontalRunnerAutoscaler")
|
||||
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
||||
}
|
||||
|
||||
// Scale-up to 2 replicas on first check_run create webhook event
|
||||
{
|
||||
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 2, "runners after first webhook event")
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(2, "count of fake list runners")
|
||||
}
|
||||
|
||||
// Scale-up to 3 replicas on second check_run create webhook event
|
||||
{
|
||||
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3, "runners after second webhook event")
|
||||
}
|
||||
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
||||
})
|
||||
|
||||
It("should create and scale user's repository runners on pull_request event", func() {
|
||||
@@ -396,7 +632,17 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
@@ -436,7 +682,11 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
MinReplicas: intPtr(1),
|
||||
MaxReplicas: intPtr(3),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
Metrics: nil,
|
||||
Metrics: []actionsv1alpha1.MetricSpec{
|
||||
{
|
||||
Type: actionsv1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns,
|
||||
},
|
||||
},
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
@@ -504,6 +754,99 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("should create and scale user's repository runners only on pull_request event", func() {
|
||||
name := "example-runnerdeploy"
|
||||
|
||||
{
|
||||
rd := &actionsv1alpha1.RunnerDeployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
}
|
||||
|
||||
{
|
||||
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.HorizontalRunnerAutoscalerSpec{
|
||||
ScaleTargetRef: actionsv1alpha1.ScaleTargetRef{
|
||||
Name: name,
|
||||
},
|
||||
MinReplicas: intPtr(1),
|
||||
MaxReplicas: intPtr(3),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
PullRequest: &actionsv1alpha1.PullRequestSpec{
|
||||
Types: []string{"created"},
|
||||
Branches: []string{"main"},
|
||||
},
|
||||
},
|
||||
Amount: 1,
|
||||
Duration: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, hra, "test HorizontalRunnerAutoscaler")
|
||||
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake runners after HRA creation")
|
||||
}
|
||||
|
||||
// Scale-up to 2 replicas on first pull_request create webhook event
|
||||
{
|
||||
env.SendUserPullRequestEvent("test", "valid", "main", "created")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 2, "runners after first webhook event")
|
||||
ExpectHRADesiredReplicasEquals(ctx, ns.Name, name, 2, "runner deployment desired replicas")
|
||||
}
|
||||
|
||||
// Scale-up to 3 replicas on second pull_request create webhook event
|
||||
{
|
||||
env.SendUserPullRequestEvent("test", "valid", "main", "created")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3, "runners after second webhook event")
|
||||
ExpectHRADesiredReplicasEquals(ctx, ns.Name, name, 3, "runner deployment desired replicas")
|
||||
}
|
||||
})
|
||||
|
||||
It("should create and scale user's repository runners on check_run event", func() {
|
||||
name := "example-runnerdeploy"
|
||||
|
||||
@@ -515,7 +858,17 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
@@ -553,7 +906,11 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
MinReplicas: intPtr(1),
|
||||
MaxReplicas: intPtr(5),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
Metrics: nil,
|
||||
Metrics: []actionsv1alpha1.MetricSpec{
|
||||
{
|
||||
Type: actionsv1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns,
|
||||
},
|
||||
},
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
@@ -599,6 +956,110 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(5, "count of fake list runners")
|
||||
})
|
||||
|
||||
It("should create and scale user's repository runners only on check_run event", func() {
|
||||
name := "example-runnerdeploy"
|
||||
|
||||
{
|
||||
rd := &actionsv1alpha1.RunnerDeployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
||||
}
|
||||
|
||||
// Scale-up to 3 replicas by the default TotalNumberOfQueuedAndInProgressWorkflowRuns-based scaling
|
||||
// See workflowRunsFor3Replicas_queued and workflowRunsFor3Replicas_in_progress for GitHub List-Runners API responses
|
||||
// used while testing.
|
||||
{
|
||||
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.HorizontalRunnerAutoscalerSpec{
|
||||
ScaleTargetRef: actionsv1alpha1.ScaleTargetRef{
|
||||
Name: name,
|
||||
},
|
||||
MinReplicas: intPtr(1),
|
||||
MaxReplicas: intPtr(5),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
CheckRun: &actionsv1alpha1.CheckRunSpec{
|
||||
Types: []string{"created"},
|
||||
Status: "pending",
|
||||
},
|
||||
},
|
||||
Amount: 1,
|
||||
Duration: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, hra, "test HorizontalRunnerAutoscaler")
|
||||
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
||||
}
|
||||
|
||||
// Scale-up to 2 replicas on first check_run create webhook event
|
||||
{
|
||||
env.SendUserCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 2, "runners after first webhook event")
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(2, "count of fake list runners")
|
||||
}
|
||||
|
||||
// Scale-up to 3 replicas on second check_run create webhook event
|
||||
{
|
||||
env.SendUserCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3, "runners after second webhook event")
|
||||
}
|
||||
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
@@ -783,6 +1244,44 @@ func ExpectRunnerSetsCountEventuallyEquals(ctx context.Context, ns string, count
|
||||
time.Second*10, time.Millisecond*500).Should(BeEquivalentTo(count), optionalDescription...)
|
||||
}
|
||||
|
||||
func ExpectRunnerCountEventuallyEquals(ctx context.Context, ns string, count int, optionalDescription ...interface{}) {
|
||||
runners := actionsv1alpha1.RunnerList{Items: []actionsv1alpha1.Runner{}}
|
||||
|
||||
EventuallyWithOffset(
|
||||
1,
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runners, client.InNamespace(ns))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
var running int
|
||||
|
||||
for _, r := range runners.Items {
|
||||
if r.Status.Phase == string(corev1.PodRunning) {
|
||||
running++
|
||||
} else {
|
||||
var pod corev1.Pod
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: r.Name}, &pod); err != nil {
|
||||
logf.Log.Error(err, "simulating pod controller")
|
||||
continue
|
||||
}
|
||||
|
||||
copy := pod.DeepCopy()
|
||||
copy.Status.Phase = corev1.PodRunning
|
||||
|
||||
if err := k8sClient.Status().Patch(ctx, copy, client.MergeFrom(&pod)); err != nil {
|
||||
logf.Log.Error(err, "simulating pod controller")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return running
|
||||
},
|
||||
time.Second*10, time.Millisecond*500).Should(BeEquivalentTo(count), optionalDescription...)
|
||||
}
|
||||
|
||||
func ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx context.Context, ns string, count int, optionalDescription ...interface{}) {
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
|
||||
67
controllers/metrics/horizontalrunnerautoscaler.go
Normal file
67
controllers/metrics/horizontalrunnerautoscaler.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
hraName = "horizontalrunnerautoscaler"
|
||||
hraNamespace = "namespace"
|
||||
)
|
||||
|
||||
var (
|
||||
horizontalRunnerAutoscalerMetrics = []prometheus.Collector{
|
||||
horizontalRunnerAutoscalerMinReplicas,
|
||||
horizontalRunnerAutoscalerMaxReplicas,
|
||||
horizontalRunnerAutoscalerDesiredReplicas,
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
horizontalRunnerAutoscalerMinReplicas = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "horizontalrunnerautoscaler_spec_min_replicas",
|
||||
Help: "minReplicas of HorizontalRunnerAutoscaler",
|
||||
},
|
||||
[]string{hraName, hraNamespace},
|
||||
)
|
||||
horizontalRunnerAutoscalerMaxReplicas = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "horizontalrunnerautoscaler_spec_max_replicas",
|
||||
Help: "maxReplicas of HorizontalRunnerAutoscaler",
|
||||
},
|
||||
[]string{hraName, hraNamespace},
|
||||
)
|
||||
horizontalRunnerAutoscalerDesiredReplicas = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "horizontalrunnerautoscaler_status_desired_replicas",
|
||||
Help: "desiredReplicas of HorizontalRunnerAutoscaler",
|
||||
},
|
||||
[]string{hraName, hraNamespace},
|
||||
)
|
||||
)
|
||||
|
||||
func SetHorizontalRunnerAutoscalerSpec(o metav1.ObjectMeta, spec v1alpha1.HorizontalRunnerAutoscalerSpec) {
|
||||
labels := prometheus.Labels{
|
||||
hraName: o.Name,
|
||||
hraNamespace: o.Namespace,
|
||||
}
|
||||
if spec.MaxReplicas != nil {
|
||||
horizontalRunnerAutoscalerMaxReplicas.With(labels).Set(float64(*spec.MaxReplicas))
|
||||
}
|
||||
if spec.MinReplicas != nil {
|
||||
horizontalRunnerAutoscalerMinReplicas.With(labels).Set(float64(*spec.MinReplicas))
|
||||
}
|
||||
}
|
||||
|
||||
func SetHorizontalRunnerAutoscalerStatus(o metav1.ObjectMeta, status v1alpha1.HorizontalRunnerAutoscalerStatus) {
|
||||
labels := prometheus.Labels{
|
||||
hraName: o.Name,
|
||||
hraNamespace: o.Namespace,
|
||||
}
|
||||
if status.DesiredReplicas != nil {
|
||||
horizontalRunnerAutoscalerDesiredReplicas.With(labels).Set(float64(*status.DesiredReplicas))
|
||||
}
|
||||
}
|
||||
14
controllers/metrics/metrics.go
Normal file
14
controllers/metrics/metrics.go
Normal file
@@ -0,0 +1,14 @@
|
||||
// Package metrics provides the metrics of custom resources such as HRA.
|
||||
//
|
||||
// This depends on the metrics exporter of kubebuilder.
|
||||
// See https://book.kubebuilder.io/reference/metrics.html for details.
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
)
|
||||
|
||||
func init() {
|
||||
metrics.Registry.MustRegister(runnerDeploymentMetrics...)
|
||||
metrics.Registry.MustRegister(horizontalRunnerAutoscalerMetrics...)
|
||||
}
|
||||
37
controllers/metrics/runnerdeployment.go
Normal file
37
controllers/metrics/runnerdeployment.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
rdName = "runnerdeployment"
|
||||
rdNamespace = "namespace"
|
||||
)
|
||||
|
||||
var (
|
||||
runnerDeploymentMetrics = []prometheus.Collector{
|
||||
runnerDeploymentReplicas,
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
runnerDeploymentReplicas = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "runnerdeployment_spec_replicas",
|
||||
Help: "replicas of RunnerDeployment",
|
||||
},
|
||||
[]string{rdName, rdNamespace},
|
||||
)
|
||||
)
|
||||
|
||||
func SetRunnerDeployment(rd v1alpha1.RunnerDeployment) {
|
||||
labels := prometheus.Labels{
|
||||
rdName: rd.Name,
|
||||
rdNamespace: rd.Namespace,
|
||||
}
|
||||
if rd.Spec.Replicas != nil {
|
||||
runnerDeploymentReplicas.With(labels).Set(float64(*rd.Spec.Replicas))
|
||||
}
|
||||
}
|
||||
@@ -20,11 +20,13 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
gogithub "github.com/google/go-github/v33/github"
|
||||
"github.com/summerwind/actions-runner-controller/hash"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
gogithub "github.com/google/go-github/v33/github"
|
||||
"github.com/summerwind/actions-runner-controller/hash"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -51,12 +53,15 @@ const (
|
||||
// RunnerReconciler reconciles a Runner object
|
||||
type RunnerReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
GitHubClient *github.Client
|
||||
RunnerImage string
|
||||
DockerImage string
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
GitHubClient *github.Client
|
||||
RunnerImage string
|
||||
DockerImage string
|
||||
Name string
|
||||
RegistrationRecheckInterval time.Duration
|
||||
RegistrationRecheckJitter time.Duration
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete
|
||||
@@ -129,8 +134,8 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
newRunner := runner.DeepCopy()
|
||||
newRunner.ObjectMeta.Finalizers = finalizers
|
||||
|
||||
if err := r.Update(ctx, newRunner); err != nil {
|
||||
log.Error(err, "Failed to update runner")
|
||||
if err := r.Patch(ctx, newRunner, client.MergeFrom(&runner)); err != nil {
|
||||
log.Error(err, "Failed to update runner for finalizer removal")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
@@ -159,31 +164,25 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
}
|
||||
|
||||
if err := r.Create(ctx, &newPod); err != nil {
|
||||
if kerrors.IsAlreadyExists(err) {
|
||||
// Gracefully handle pod-already-exists errors due to informer cache delay.
|
||||
// Without this we got a few errors like the below on new runner pod:
|
||||
// 2021-03-16T00:23:10.116Z ERROR controller-runtime.controller Reconciler error {"controller": "runner-controller", "request": "default/example-runnerdeploy-b2g2g-j4mcp", "error": "pods \"example-runnerdeploy-b2g2g-j4mcp\" already exists"}
|
||||
log.Info(
|
||||
"Failed to create pod due to AlreadyExists error. Probably this pod has been already created in previous reconcilation but is still not in the informer cache. Will retry on pod created. If it doesn't repeat, there's no problem",
|
||||
)
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
log.Error(err, "Failed to create pod resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
r.Recorder.Event(&runner, corev1.EventTypeNormal, "PodCreated", fmt.Sprintf("Created pod '%s'", newPod.Name))
|
||||
log.Info("Created runner pod", "repository", runner.Spec.Repository)
|
||||
} else {
|
||||
// If pod has ended up succeeded we need to restart it
|
||||
// Happens e.g. when dind is in runner and run completes
|
||||
restart := pod.Status.Phase == corev1.PodSucceeded
|
||||
|
||||
if !restart && runner.Status.Phase != string(pod.Status.Phase) {
|
||||
updated := runner.DeepCopy()
|
||||
updated.Status.Phase = string(pod.Status.Phase)
|
||||
updated.Status.Reason = pod.Status.Reason
|
||||
updated.Status.Message = pod.Status.Message
|
||||
|
||||
if err := r.Status().Update(ctx, updated); err != nil {
|
||||
log.Error(err, "Failed to update runner status")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if !pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
deletionTimeout := 1 * time.Minute
|
||||
currentTime := time.Now()
|
||||
@@ -191,7 +190,7 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
|
||||
if deletionDidTimeout {
|
||||
log.Info(
|
||||
"Pod failed to delete itself in a timely manner. "+
|
||||
fmt.Sprintf("Failed to delete pod within %s. ", deletionTimeout)+
|
||||
"This is typically the case when a Kubernetes node became unreachable "+
|
||||
"and the kube controller started evicting nodes. Forcefully deleting the pod to not get stuck.",
|
||||
"podDeletionTimestamp", pod.DeletionTimestamp,
|
||||
@@ -220,6 +219,10 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// If pod has ended up succeeded we need to restart it
|
||||
// Happens e.g. when dind is in runner and run completes
|
||||
restart := pod.Status.Phase == corev1.PodSucceeded
|
||||
|
||||
if pod.Status.Phase == corev1.PodRunning {
|
||||
for _, status := range pod.Status.ContainerStatuses {
|
||||
if status.Name != containerName {
|
||||
@@ -244,24 +247,61 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
var registrationRecheckDelay time.Duration
|
||||
|
||||
// all checks done below only decide whether a restart is needed
|
||||
// if a restart was already decided before, there is no need for the checks
|
||||
// saving API calls and scary log messages
|
||||
// saving API calls and scary{ log messages
|
||||
if !restart {
|
||||
registrationCheckInterval := time.Minute
|
||||
if r.RegistrationRecheckInterval > 0 {
|
||||
registrationCheckInterval = r.RegistrationRecheckInterval
|
||||
}
|
||||
|
||||
notRegistered := false
|
||||
// We want to call ListRunners GitHub Actions API only once per runner per minute.
|
||||
// This if block, in conjunction with:
|
||||
// return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil
|
||||
// achieves that.
|
||||
if lastCheckTime := runner.Status.LastRegistrationCheckTime; lastCheckTime != nil {
|
||||
nextCheckTime := lastCheckTime.Add(registrationCheckInterval)
|
||||
now := time.Now()
|
||||
|
||||
// Requeue scheduled by RequeueAfter can happen a bit earlier (like dozens of milliseconds)
|
||||
// so to avoid excessive, in-effective retry, we heuristically ignore the remaining delay in case it is
|
||||
// shorter than 1s
|
||||
requeueAfter := nextCheckTime.Sub(now) - time.Second
|
||||
if requeueAfter > 0 {
|
||||
log.Info(
|
||||
fmt.Sprintf("Skipped registration check because it's deferred until %s. Retrying in %s at latest", nextCheckTime, requeueAfter),
|
||||
"lastRegistrationCheckTime", lastCheckTime,
|
||||
"registrationCheckInterval", registrationCheckInterval,
|
||||
)
|
||||
|
||||
// Without RequeueAfter, the controller may not retry on scheduled. Instead, it must wait until the
|
||||
// next sync period passes, which can be too much later than nextCheckTime.
|
||||
//
|
||||
// We need to requeue on this reconcilation even though we have already scheduled the initial
|
||||
// requeue previously with `return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil`.
|
||||
// Apparently, the workqueue used by controller-runtime seems to deduplicate and resets the delay on
|
||||
// other requeues- so the initial scheduled requeue may have been reset due to requeue on
|
||||
// spec/status change.
|
||||
return ctrl.Result{RequeueAfter: requeueAfter}, nil
|
||||
}
|
||||
}
|
||||
|
||||
notFound := false
|
||||
offline := false
|
||||
|
||||
runnerBusy, err := r.GitHubClient.IsRunnerBusy(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||
|
||||
currentTime := time.Now()
|
||||
|
||||
if err != nil {
|
||||
var notFoundException *github.RunnerNotFound
|
||||
var offlineException *github.RunnerOffline
|
||||
if errors.As(err, ¬FoundException) {
|
||||
log.V(1).Info("Failed to check if runner is busy. Either this runner has never been successfully registered to GitHub or it still needs more time.", "runnerName", runner.Name)
|
||||
|
||||
notRegistered = true
|
||||
notFound = true
|
||||
} else if errors.As(err, &offlineException) {
|
||||
log.V(1).Info("GitHub runner appears to be offline, waiting for runner to get online ...", "runnerName", runner.Name)
|
||||
offline = true
|
||||
} else {
|
||||
var e *gogithub.RateLimitError
|
||||
@@ -293,40 +333,96 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
}
|
||||
|
||||
registrationTimeout := 10 * time.Minute
|
||||
currentTime := time.Now()
|
||||
registrationDidTimeout := currentTime.Sub(pod.CreationTimestamp.Add(registrationTimeout)) > 0
|
||||
durationAfterRegistrationTimeout := currentTime.Sub(pod.CreationTimestamp.Add(registrationTimeout))
|
||||
registrationDidTimeout := durationAfterRegistrationTimeout > 0
|
||||
|
||||
if notRegistered && registrationDidTimeout {
|
||||
log.Info(
|
||||
"Runner failed to register itself to GitHub in timely manner. "+
|
||||
"Recreating the pod to see if it resolves the issue. "+
|
||||
"CAUTION: If you see this a lot, you should investigate the root cause. "+
|
||||
"See https://github.com/summerwind/actions-runner-controller/issues/288",
|
||||
"podCreationTimestamp", pod.CreationTimestamp,
|
||||
"currentTime", currentTime,
|
||||
"configuredRegistrationTimeout", registrationTimeout,
|
||||
)
|
||||
if notFound {
|
||||
if registrationDidTimeout {
|
||||
log.Info(
|
||||
"Runner failed to register itself to GitHub in timely manner. "+
|
||||
"Recreating the pod to see if it resolves the issue. "+
|
||||
"CAUTION: If you see this a lot, you should investigate the root cause. "+
|
||||
"See https://github.com/summerwind/actions-runner-controller/issues/288",
|
||||
"podCreationTimestamp", pod.CreationTimestamp,
|
||||
"currentTime", currentTime,
|
||||
"configuredRegistrationTimeout", registrationTimeout,
|
||||
)
|
||||
|
||||
restart = true
|
||||
restart = true
|
||||
} else {
|
||||
log.V(1).Info(
|
||||
"Runner pod exists but we failed to check if runner is busy. Apparently it still needs more time.",
|
||||
"runnerName", runner.Name,
|
||||
)
|
||||
}
|
||||
} else if offline {
|
||||
if registrationDidTimeout {
|
||||
log.Info(
|
||||
"Already existing GitHub runner still appears offline . "+
|
||||
"Recreating the pod to see if it resolves the issue. "+
|
||||
"CAUTION: If you see this a lot, you should investigate the root cause. ",
|
||||
"podCreationTimestamp", pod.CreationTimestamp,
|
||||
"currentTime", currentTime,
|
||||
"configuredRegistrationTimeout", registrationTimeout,
|
||||
)
|
||||
|
||||
restart = true
|
||||
} else {
|
||||
log.V(1).Info(
|
||||
"Runner pod exists but the GitHub runner appears to be still offline. Waiting for runner to get online ...",
|
||||
"runnerName", runner.Name,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if offline && registrationDidTimeout {
|
||||
log.Info(
|
||||
"Already existing GitHub runner still appears offline . "+
|
||||
"Recreating the pod to see if it resolves the issue. "+
|
||||
"CAUTION: If you see this a lot, you should investigate the root cause. ",
|
||||
"podCreationTimestamp", pod.CreationTimestamp,
|
||||
"currentTime", currentTime,
|
||||
"configuredRegistrationTimeout", registrationTimeout,
|
||||
)
|
||||
if (notFound || offline) && !registrationDidTimeout {
|
||||
registrationRecheckJitter := 10 * time.Second
|
||||
if r.RegistrationRecheckJitter > 0 {
|
||||
registrationRecheckJitter = r.RegistrationRecheckJitter
|
||||
}
|
||||
|
||||
restart = true
|
||||
registrationRecheckDelay = registrationCheckInterval + wait.Jitter(registrationRecheckJitter, 0.1)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Don't do anything if there's no need to restart the runner
|
||||
if !restart {
|
||||
// This guard enables us to update runner.Status.Phase to `Running` only after
|
||||
// the runner is registered to GitHub.
|
||||
if registrationRecheckDelay > 0 {
|
||||
log.V(1).Info(fmt.Sprintf("Rechecking the runner registration in %s", registrationRecheckDelay))
|
||||
|
||||
updated := runner.DeepCopy()
|
||||
updated.Status.LastRegistrationCheckTime = &metav1.Time{Time: time.Now()}
|
||||
|
||||
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&runner)); err != nil {
|
||||
log.Error(err, "Failed to update runner status for LastRegistrationCheckTime")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil
|
||||
}
|
||||
|
||||
if runner.Status.Phase != string(pod.Status.Phase) {
|
||||
if pod.Status.Phase == corev1.PodRunning {
|
||||
// Seeing this message, you can expect the runner to become `Running` soon.
|
||||
log.Info(
|
||||
"Runner appears to have registered and running.",
|
||||
"podCreationTimestamp", pod.CreationTimestamp,
|
||||
)
|
||||
}
|
||||
|
||||
updated := runner.DeepCopy()
|
||||
updated.Status.Phase = string(pod.Status.Phase)
|
||||
updated.Status.Reason = pod.Status.Reason
|
||||
updated.Status.Message = pod.Status.Message
|
||||
|
||||
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&runner)); err != nil {
|
||||
log.Error(err, "Failed to update runner status for Phase/Reason/Message")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -394,8 +490,8 @@ func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v
|
||||
ExpiresAt: metav1.NewTime(rt.GetExpiresAt().Time),
|
||||
}
|
||||
|
||||
if err := r.Status().Update(ctx, updated); err != nil {
|
||||
log.Error(err, "Failed to update runner status")
|
||||
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&runner)); err != nil {
|
||||
log.Error(err, "Failed to update runner status for Registration")
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -530,45 +626,72 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
},
|
||||
}
|
||||
|
||||
if !dockerdInRunner && dockerEnabled {
|
||||
runnerVolumeName := "runner"
|
||||
runnerVolumeMountPath := "/runner"
|
||||
|
||||
pod.Spec.Volumes = []corev1.Volume{
|
||||
if mtu := runner.Spec.DockerMTU; mtu != nil && dockerdInRunner {
|
||||
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, []corev1.EnvVar{
|
||||
{
|
||||
Name: "MTU",
|
||||
Value: fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
||||
//
|
||||
// /runner must be generated on runtime from /runnertmp embedded in the container image.
|
||||
//
|
||||
// When you're NOT using dindWithinRunner=true,
|
||||
// it must also be shared with the dind container as it seems like required to run docker steps.
|
||||
//
|
||||
|
||||
runnerVolumeName := "runner"
|
||||
runnerVolumeMountPath := "/runner"
|
||||
runnerVolumeEmptyDir := &corev1.EmptyDirVolumeSource{}
|
||||
|
||||
if runner.Spec.VolumeSizeLimit != nil {
|
||||
runnerVolumeEmptyDir.SizeLimit = runner.Spec.VolumeSizeLimit
|
||||
}
|
||||
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||
corev1.Volume{
|
||||
Name: runnerVolumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: runnerVolumeEmptyDir,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
|
||||
corev1.VolumeMount{
|
||||
Name: runnerVolumeName,
|
||||
MountPath: runnerVolumeMountPath,
|
||||
},
|
||||
)
|
||||
|
||||
if !dockerdInRunner && dockerEnabled {
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||
corev1.Volume{
|
||||
Name: "work",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: runnerVolumeName,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
corev1.Volume{
|
||||
Name: "certs-client",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
|
||||
{
|
||||
)
|
||||
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
|
||||
corev1.VolumeMount{
|
||||
Name: "work",
|
||||
MountPath: workDir,
|
||||
},
|
||||
{
|
||||
Name: runnerVolumeName,
|
||||
MountPath: runnerVolumeMountPath,
|
||||
},
|
||||
{
|
||||
corev1.VolumeMount{
|
||||
Name: "certs-client",
|
||||
MountPath: "/certs/client",
|
||||
ReadOnly: true,
|
||||
},
|
||||
}
|
||||
)
|
||||
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, []corev1.EnvVar{
|
||||
{
|
||||
Name: "DOCKER_HOST",
|
||||
@@ -583,23 +706,31 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
Value: "/certs/client",
|
||||
},
|
||||
}...)
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, corev1.Container{
|
||||
Name: "docker",
|
||||
Image: r.DockerImage,
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "work",
|
||||
MountPath: workDir,
|
||||
},
|
||||
{
|
||||
Name: runnerVolumeName,
|
||||
MountPath: runnerVolumeMountPath,
|
||||
},
|
||||
{
|
||||
Name: "certs-client",
|
||||
MountPath: "/certs/client",
|
||||
},
|
||||
|
||||
// Determine the volume mounts assigned to the docker sidecar. In case extra mounts are included in the RunnerSpec, append them to the standard
|
||||
// set of mounts. See https://github.com/summerwind/actions-runner-controller/issues/435 for context.
|
||||
dockerVolumeMounts := []corev1.VolumeMount{
|
||||
{
|
||||
Name: "work",
|
||||
MountPath: workDir,
|
||||
},
|
||||
{
|
||||
Name: runnerVolumeName,
|
||||
MountPath: runnerVolumeMountPath,
|
||||
},
|
||||
{
|
||||
Name: "certs-client",
|
||||
MountPath: "/certs/client",
|
||||
},
|
||||
}
|
||||
if extraDockerVolumeMounts := runner.Spec.DockerVolumeMounts; extraDockerVolumeMounts != nil {
|
||||
dockerVolumeMounts = append(dockerVolumeMounts, extraDockerVolumeMounts...)
|
||||
}
|
||||
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, corev1.Container{
|
||||
Name: "docker",
|
||||
Image: r.DockerImage,
|
||||
VolumeMounts: dockerVolumeMounts,
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "DOCKER_TLS_CERTDIR",
|
||||
@@ -612,6 +743,21 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
Resources: runner.Spec.DockerdContainerResources,
|
||||
})
|
||||
|
||||
if mtu := runner.Spec.DockerMTU; mtu != nil {
|
||||
pod.Spec.Containers[1].Env = append(pod.Spec.Containers[1].Env, []corev1.EnvVar{
|
||||
// See https://docs.docker.com/engine/security/rootless/
|
||||
{
|
||||
Name: "DOCKERD_ROOTLESS_ROOTLESSKIT_MTU",
|
||||
Value: fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
||||
},
|
||||
}...)
|
||||
|
||||
pod.Spec.Containers[1].Args = append(pod.Spec.Containers[1].Args,
|
||||
"--mtu",
|
||||
fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
||||
)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(runner.Spec.Containers) != 0 {
|
||||
@@ -672,6 +818,10 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
pod.Spec.TerminationGracePeriodSeconds = runner.Spec.TerminationGracePeriodSeconds
|
||||
}
|
||||
|
||||
if len(runner.Spec.HostAliases) != 0 {
|
||||
pod.Spec.HostAliases = runner.Spec.HostAliases
|
||||
}
|
||||
|
||||
if err := ctrl.SetControllerReference(&runner, &pod, r.Scheme); err != nil {
|
||||
return pod, err
|
||||
}
|
||||
@@ -681,6 +831,9 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
|
||||
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
name := "runner-controller"
|
||||
if r.Name != "" {
|
||||
name = r.Name
|
||||
}
|
||||
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
@@ -37,10 +38,12 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/summerwind/actions-runner-controller/controllers/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
||||
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
||||
LabelKeyRunnerDeploymentName = "runner-deployment-name"
|
||||
|
||||
runnerSetOwnerKey = ".metadata.controller"
|
||||
)
|
||||
@@ -75,6 +78,8 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
metrics.SetRunnerDeployment(rd)
|
||||
|
||||
var myRunnerReplicaSetList v1alpha1.RunnerReplicaSetList
|
||||
if err := r.List(ctx, &myRunnerReplicaSetList, client.InNamespace(req.Namespace), client.MatchingFields{runnerSetOwnerKey: req.Name}); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
@@ -143,6 +148,28 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(newestSet.Spec.Selector, desiredRS.Spec.Selector) {
|
||||
updateSet := newestSet.DeepCopy()
|
||||
updateSet.Spec = *desiredRS.Spec.DeepCopy()
|
||||
|
||||
// A selector update change doesn't trigger replicaset replacement,
|
||||
// but we still need to update the existing replicaset with it.
|
||||
// Otherwise selector-based runner query will never work on replicasets created before the controller v0.17.0
|
||||
// See https://github.com/summerwind/actions-runner-controller/pull/355#discussion_r585379259
|
||||
if err := r.Client.Update(ctx, updateSet); err != nil {
|
||||
log.Error(err, "Failed to update runnerreplicaset resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// At this point, we are already sure that there's no need to create a new replicaset
|
||||
// as the runner template hash is not changed.
|
||||
//
|
||||
// But we still need to requeue for the (possibly rare) cases that there are still old replicasets that needs
|
||||
// to be cleaned up.
|
||||
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
||||
}
|
||||
|
||||
const defaultReplicas = 1
|
||||
|
||||
currentDesiredReplicas := getIntOrDefault(newestSet.Spec.Replicas, defaultReplicas)
|
||||
@@ -165,14 +192,28 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
if len(oldSets) > 0 {
|
||||
readyReplicas := newestSet.Status.ReadyReplicas
|
||||
|
||||
if readyReplicas < currentDesiredReplicas {
|
||||
log.WithValues("runnerreplicaset", types.NamespacedName{
|
||||
oldSetsCount := len(oldSets)
|
||||
|
||||
logWithDebugInfo := log.WithValues(
|
||||
"newest_runnerreplicaset", types.NamespacedName{
|
||||
Namespace: newestSet.Namespace,
|
||||
Name: newestSet.Name,
|
||||
}).
|
||||
Info("Waiting until the newest runner replica set to be 100% available")
|
||||
},
|
||||
"newest_runnerreplicaset_replicas_ready", readyReplicas,
|
||||
"newest_runnerreplicaset_replicas_desired", currentDesiredReplicas,
|
||||
"old_runnerreplicasets_count", oldSetsCount,
|
||||
)
|
||||
|
||||
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
|
||||
if readyReplicas < currentDesiredReplicas {
|
||||
logWithDebugInfo.
|
||||
Info("Waiting until the newest runnerreplicaset to be 100% available")
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if oldSetsCount > 0 {
|
||||
logWithDebugInfo.
|
||||
Info("The newest runnerreplicaset is 100% available. Deleting old runnerreplicasets")
|
||||
}
|
||||
|
||||
for i := range oldSets {
|
||||
@@ -258,32 +299,94 @@ func CloneAndAddLabel(labels map[string]string, labelKey, labelValue string) map
|
||||
return newLabels
|
||||
}
|
||||
|
||||
func (r *RunnerDeploymentReconciler) newRunnerReplicaSet(rd v1alpha1.RunnerDeployment) (*v1alpha1.RunnerReplicaSet, error) {
|
||||
newRSTemplate := *rd.Spec.Template.DeepCopy()
|
||||
templateHash := ComputeHash(&newRSTemplate)
|
||||
// Add template hash label to selector.
|
||||
labels := CloneAndAddLabel(rd.Spec.Template.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
// Clones the given selector and returns a new selector with the given key and value added.
|
||||
// Returns the given selector, if labelKey is empty.
|
||||
//
|
||||
// Proudly copied from k8s.io/kubernetes/pkg/util/labels.CloneSelectorAndAddLabel
|
||||
func CloneSelectorAndAddLabel(selector *metav1.LabelSelector, labelKey, labelValue string) *metav1.LabelSelector {
|
||||
if labelKey == "" {
|
||||
// Don't need to add a label.
|
||||
return selector
|
||||
}
|
||||
|
||||
for _, l := range r.CommonRunnerLabels {
|
||||
// Clone.
|
||||
newSelector := new(metav1.LabelSelector)
|
||||
|
||||
newSelector.MatchLabels = make(map[string]string)
|
||||
if selector.MatchLabels != nil {
|
||||
for key, val := range selector.MatchLabels {
|
||||
newSelector.MatchLabels[key] = val
|
||||
}
|
||||
}
|
||||
newSelector.MatchLabels[labelKey] = labelValue
|
||||
|
||||
if selector.MatchExpressions != nil {
|
||||
newMExps := make([]metav1.LabelSelectorRequirement, len(selector.MatchExpressions))
|
||||
for i, me := range selector.MatchExpressions {
|
||||
newMExps[i].Key = me.Key
|
||||
newMExps[i].Operator = me.Operator
|
||||
if me.Values != nil {
|
||||
newMExps[i].Values = make([]string, len(me.Values))
|
||||
copy(newMExps[i].Values, me.Values)
|
||||
} else {
|
||||
newMExps[i].Values = nil
|
||||
}
|
||||
}
|
||||
newSelector.MatchExpressions = newMExps
|
||||
} else {
|
||||
newSelector.MatchExpressions = nil
|
||||
}
|
||||
|
||||
return newSelector
|
||||
}
|
||||
|
||||
func (r *RunnerDeploymentReconciler) newRunnerReplicaSet(rd v1alpha1.RunnerDeployment) (*v1alpha1.RunnerReplicaSet, error) {
|
||||
return newRunnerReplicaSet(&rd, r.CommonRunnerLabels, r.Scheme)
|
||||
}
|
||||
|
||||
func getSelector(rd *v1alpha1.RunnerDeployment) *metav1.LabelSelector {
|
||||
selector := rd.Spec.Selector
|
||||
if selector == nil {
|
||||
selector = &metav1.LabelSelector{MatchLabels: map[string]string{LabelKeyRunnerDeploymentName: rd.Name}}
|
||||
}
|
||||
|
||||
return selector
|
||||
}
|
||||
|
||||
func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []string, scheme *runtime.Scheme) (*v1alpha1.RunnerReplicaSet, error) {
|
||||
newRSTemplate := *rd.Spec.Template.DeepCopy()
|
||||
|
||||
for _, l := range commonRunnerLabels {
|
||||
newRSTemplate.Spec.Labels = append(newRSTemplate.Spec.Labels, l)
|
||||
}
|
||||
|
||||
newRSTemplate.Labels = labels
|
||||
templateHash := ComputeHash(&newRSTemplate)
|
||||
|
||||
// Add template hash label to selector.
|
||||
newRSTemplate.ObjectMeta.Labels = CloneAndAddLabel(newRSTemplate.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
|
||||
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||
newRSTemplate.ObjectMeta.Labels = CloneAndAddLabel(newRSTemplate.ObjectMeta.Labels, LabelKeyRunnerDeploymentName, rd.Name)
|
||||
|
||||
selector := getSelector(rd)
|
||||
|
||||
newRSSelector := CloneSelectorAndAddLabel(selector, LabelKeyRunnerTemplateHash, templateHash)
|
||||
|
||||
rs := v1alpha1.RunnerReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: rd.ObjectMeta.Name + "-",
|
||||
Namespace: rd.ObjectMeta.Namespace,
|
||||
Labels: labels,
|
||||
Labels: newRSTemplate.ObjectMeta.Labels,
|
||||
},
|
||||
Spec: v1alpha1.RunnerReplicaSetSpec{
|
||||
Replicas: rd.Spec.Replicas,
|
||||
Selector: newRSSelector,
|
||||
Template: newRSTemplate,
|
||||
},
|
||||
}
|
||||
|
||||
if err := ctrl.SetControllerReference(&rd, &rs, r.Scheme); err != nil {
|
||||
if err := ctrl.SetControllerReference(rd, &rs, scheme); err != nil {
|
||||
return &rs, err
|
||||
}
|
||||
|
||||
|
||||
@@ -2,11 +2,13 @@ package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
@@ -36,7 +38,17 @@ func TestNewRunnerReplicaSet(t *testing.T) {
|
||||
Name: "example",
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Labels: []string{"project1"},
|
||||
},
|
||||
@@ -49,10 +61,63 @@ func TestNewRunnerReplicaSet(t *testing.T) {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
want := []string{"project1", "dev"}
|
||||
if d := cmp.Diff(want, rs.Spec.Template.Spec.Labels); d != "" {
|
||||
if val, ok := rs.Labels["foo"]; ok {
|
||||
if val != "bar" {
|
||||
t.Errorf("foo label does not have bar but %v", val)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("foo label does not exist")
|
||||
}
|
||||
|
||||
hash1, ok := rs.Labels[LabelKeyRunnerTemplateHash]
|
||||
if !ok {
|
||||
t.Errorf("missing runner-template-hash label")
|
||||
}
|
||||
|
||||
runnerLabel := []string{"project1", "dev"}
|
||||
if d := cmp.Diff(runnerLabel, rs.Spec.Template.Spec.Labels); d != "" {
|
||||
t.Errorf("%s", d)
|
||||
}
|
||||
|
||||
rd2 := rd.DeepCopy()
|
||||
rd2.Spec.Template.Spec.Labels = []string{"project2"}
|
||||
|
||||
rs2, err := r.newRunnerReplicaSet(*rd2)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
hash2, ok := rs2.Labels[LabelKeyRunnerTemplateHash]
|
||||
if !ok {
|
||||
t.Errorf("missing runner-template-hash label")
|
||||
}
|
||||
|
||||
if hash1 == hash2 {
|
||||
t.Errorf(
|
||||
"runner replica sets from runner deployments with varying labels must have different template hash, but got %s and %s",
|
||||
hash1, hash2,
|
||||
)
|
||||
}
|
||||
|
||||
rd3 := rd.DeepCopy()
|
||||
rd3.Spec.Template.Labels["foo"] = "baz"
|
||||
|
||||
rs3, err := r.newRunnerReplicaSet(*rd3)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
hash3, ok := rs3.Labels[LabelKeyRunnerTemplateHash]
|
||||
if !ok {
|
||||
t.Errorf("missing runner-template-hash label")
|
||||
}
|
||||
|
||||
if hash1 == hash3 {
|
||||
t.Errorf(
|
||||
"runner replica sets from runner deployments with varying meta labels must have different template hash, but got %s and %s",
|
||||
hash1, hash3,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// SetupDeploymentTest will set up a testing environment.
|
||||
@@ -74,7 +139,9 @@ func SetupDeploymentTest(ctx context.Context) *corev1.Namespace {
|
||||
err := k8sClient.Create(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Namespace: ns.Name,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||
|
||||
controller := &RunnerDeploymentReconciler{
|
||||
@@ -112,7 +179,7 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
Describe("when no existing resources exist", func() {
|
||||
|
||||
It("should create a new RunnerReplicaSet resource from the specified template, add a another RunnerReplicaSet on template modification, and eventually removes old runnerreplicasets", func() {
|
||||
name := "example-runnerdeploy"
|
||||
name := "example-runnerdeploy-1"
|
||||
|
||||
{
|
||||
rs := &actionsv1alpha1.RunnerDeployment{
|
||||
@@ -122,9 +189,19 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "foo/bar",
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
@@ -141,29 +218,25 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
||||
func() (int, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return len(runnerSets.Items)
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
||||
err = k8sClient.List(
|
||||
ctx,
|
||||
&runnerSets,
|
||||
client.InNamespace(ns.Name),
|
||||
client.MatchingLabelsSelector{Selector: selector},
|
||||
)
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
return 0, err
|
||||
}
|
||||
if len(runnerSets.Items) != 1 {
|
||||
return 0, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||
}
|
||||
|
||||
if len(runnerSets.Items) == 0 {
|
||||
logf.Log.Info("No runnerreplicasets exist yet")
|
||||
return -1
|
||||
}
|
||||
|
||||
return *runnerSets.Items[0].Spec.Replicas
|
||||
return *runnerSets.Items[0].Spec.Replicas, nil
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||
}
|
||||
@@ -172,13 +245,12 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
// We wrap the update in the Eventually block to avoid the below error that occurs due to concurrent modification
|
||||
// made by the controller to update .Status.AvailableReplicas and .Status.ReadyReplicas
|
||||
// Operation cannot be fulfilled on runnersets.actions.summerwind.dev "example-runnerset": the object has been modified; please apply your changes to the latest version and try again
|
||||
var rd actionsv1alpha1.RunnerDeployment
|
||||
Eventually(func() error {
|
||||
var rd actionsv1alpha1.RunnerDeployment
|
||||
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: name}, &rd)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get test RunnerReplicaSet resource")
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get test RunnerReplicaSet resource: %v\n", err)
|
||||
}
|
||||
rd.Spec.Replicas = intPtr(2)
|
||||
|
||||
return k8sClient.Update(ctx, &rd)
|
||||
@@ -188,27 +260,222 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
||||
func() (int, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(rd.Spec.Selector)
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
return 0, err
|
||||
}
|
||||
err = k8sClient.List(
|
||||
ctx,
|
||||
&runnerSets,
|
||||
client.InNamespace(ns.Name),
|
||||
client.MatchingLabelsSelector{Selector: selector},
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(runnerSets.Items) != 1 {
|
||||
return 0, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||
}
|
||||
|
||||
return len(runnerSets.Items)
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
return *runnerSets.Items[0].Spec.Replicas
|
||||
return *runnerSets.Items[0].Spec.Replicas, nil
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(2))
|
||||
}
|
||||
})
|
||||
|
||||
It("should create a new RunnerReplicaSet resource from the specified template without labels and selector, add a another RunnerReplicaSet on template modification, and eventually removes old runnerreplicasets", func() {
|
||||
name := "example-runnerdeploy-2"
|
||||
|
||||
{
|
||||
rs := &actionsv1alpha1.RunnerDeployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, rs)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test RunnerReplicaSet resource")
|
||||
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = k8sClient.List(
|
||||
ctx,
|
||||
&runnerSets,
|
||||
client.InNamespace(ns.Name),
|
||||
client.MatchingLabelsSelector{Selector: selector},
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(runnerSets.Items) != 1 {
|
||||
return 0, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||
}
|
||||
|
||||
return *runnerSets.Items[0].Spec.Replicas, nil
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||
}
|
||||
|
||||
{
|
||||
// We wrap the update in the Eventually block to avoid the below error that occurs due to concurrent modification
|
||||
// made by the controller to update .Status.AvailableReplicas and .Status.ReadyReplicas
|
||||
// Operation cannot be fulfilled on runnersets.actions.summerwind.dev "example-runnerset": the object has been modified; please apply your changes to the latest version and try again
|
||||
var rd actionsv1alpha1.RunnerDeployment
|
||||
Eventually(func() error {
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: name}, &rd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get test RunnerReplicaSet resource: %v\n", err)
|
||||
}
|
||||
rd.Spec.Replicas = intPtr(2)
|
||||
|
||||
return k8sClient.Update(ctx, &rd)
|
||||
},
|
||||
time.Second*1, time.Millisecond*500).Should(BeNil())
|
||||
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(rd.Spec.Selector)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = k8sClient.List(
|
||||
ctx,
|
||||
&runnerSets,
|
||||
client.InNamespace(ns.Name),
|
||||
client.MatchingLabelsSelector{Selector: selector},
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(runnerSets.Items) != 1 {
|
||||
return 0, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||
}
|
||||
|
||||
return *runnerSets.Items[0].Spec.Replicas, nil
|
||||
},
|
||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(2))
|
||||
}
|
||||
})
|
||||
|
||||
It("should adopt RunnerReplicaSet created before 0.18.0 to have Spec.Selector", func() {
|
||||
name := "example-runnerdeploy-2"
|
||||
|
||||
{
|
||||
rd := &actionsv1alpha1.RunnerDeployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
createRDErr := k8sClient.Create(ctx, rd)
|
||||
Expect(createRDErr).NotTo(HaveOccurred(), "failed to create test RunnerReplicaSet resource")
|
||||
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
err := k8sClient.List(
|
||||
ctx,
|
||||
&runnerSets,
|
||||
client.InNamespace(ns.Name),
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return len(runnerSets.Items), nil
|
||||
},
|
||||
time.Second*1, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||
|
||||
var rs17 *actionsv1alpha1.RunnerReplicaSet
|
||||
|
||||
Consistently(
|
||||
func() (*metav1.LabelSelector, error) {
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
err := k8sClient.List(
|
||||
ctx,
|
||||
&runnerSets,
|
||||
client.InNamespace(ns.Name),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(runnerSets.Items) != 1 {
|
||||
return nil, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||
}
|
||||
|
||||
rs17 = &runnerSets.Items[0]
|
||||
|
||||
return runnerSets.Items[0].Spec.Selector, nil
|
||||
},
|
||||
time.Second*1, time.Millisecond*500).Should(Not(BeNil()))
|
||||
|
||||
// We simulate the old, pre 0.18.0 RunnerReplicaSet by updating it.
|
||||
// I've tried to use controllerutil.Set{Owner,Controller}Reference and k8sClient.Create(rs17)
|
||||
// but it didn't work due to missing RD UID, where UID is generated on K8s API server on k8sCLient.Create(rd)
|
||||
rs17.Spec.Selector = nil
|
||||
|
||||
updateRSErr := k8sClient.Update(ctx, rs17)
|
||||
Expect(updateRSErr).NotTo(HaveOccurred())
|
||||
|
||||
Eventually(
|
||||
func() (*metav1.LabelSelector, error) {
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
err := k8sClient.List(
|
||||
ctx,
|
||||
&runnerSets,
|
||||
client.InNamespace(ns.Name),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(runnerSets.Items) != 1 {
|
||||
return nil, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||
}
|
||||
|
||||
return runnerSets.Items[0].Spec.Selector, nil
|
||||
},
|
||||
time.Second*1, time.Millisecond*500).Should(Not(BeNil()))
|
||||
}
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
@@ -68,8 +68,18 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
// Get the Runners managed by the target RunnerReplicaSet
|
||||
var allRunners v1alpha1.RunnerList
|
||||
if err := r.List(ctx, &allRunners, client.InNamespace(req.Namespace)); err != nil {
|
||||
if err := r.List(
|
||||
ctx,
|
||||
&allRunners,
|
||||
client.InNamespace(req.Namespace),
|
||||
client.MatchingLabelsSelector{Selector: selector},
|
||||
); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
@@ -77,9 +87,14 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
|
||||
var myRunners []v1alpha1.Runner
|
||||
|
||||
var available, ready int
|
||||
var (
|
||||
available int
|
||||
ready int
|
||||
)
|
||||
|
||||
for _, r := range allRunners.Items {
|
||||
// This guard is required to avoid the RunnerReplicaSet created by the controller v0.17.0 or before
|
||||
// to not treat all the runners in the namespace as its children.
|
||||
if metav1.IsControlledBy(&r, &rs) {
|
||||
myRunners = append(myRunners, r)
|
||||
|
||||
@@ -99,14 +114,15 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
desired = 1
|
||||
}
|
||||
|
||||
log.V(0).Info("debug", "desired", desired, "available", available)
|
||||
|
||||
if available > desired {
|
||||
n := available - desired
|
||||
|
||||
// get runners that are currently not busy
|
||||
var notBusy []v1alpha1.Runner
|
||||
for _, runner := range myRunners {
|
||||
log.V(0).Info(fmt.Sprintf("Deleting %d runners", n), "desired", desired, "available", available, "ready", ready)
|
||||
|
||||
// get runners that are currently offline/not busy/timed-out to register
|
||||
var deletionCandidates []v1alpha1.Runner
|
||||
|
||||
for _, runner := range allRunners.Items {
|
||||
busy, err := r.GitHubClient.IsRunnerBusy(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||
if err != nil {
|
||||
notRegistered := false
|
||||
@@ -153,35 +169,37 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
"configuredRegistrationTimeout", registrationTimeout,
|
||||
)
|
||||
|
||||
notBusy = append(notBusy, runner)
|
||||
deletionCandidates = append(deletionCandidates, runner)
|
||||
}
|
||||
|
||||
// offline runners should always be a great target for scale down
|
||||
if offline {
|
||||
notBusy = append(notBusy, runner)
|
||||
deletionCandidates = append(deletionCandidates, runner)
|
||||
}
|
||||
} else if !busy {
|
||||
notBusy = append(notBusy, runner)
|
||||
deletionCandidates = append(deletionCandidates, runner)
|
||||
}
|
||||
}
|
||||
|
||||
if len(notBusy) < n {
|
||||
n = len(notBusy)
|
||||
if len(deletionCandidates) < n {
|
||||
n = len(deletionCandidates)
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
if err := r.Client.Delete(ctx, ¬Busy[i]); client.IgnoreNotFound(err) != nil {
|
||||
if err := r.Client.Delete(ctx, &deletionCandidates[i]); client.IgnoreNotFound(err) != nil {
|
||||
log.Error(err, "Failed to delete runner resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
r.Recorder.Event(&rs, corev1.EventTypeNormal, "RunnerDeleted", fmt.Sprintf("Deleted runner '%s'", notBusy[i].Name))
|
||||
log.Info("Deleted runner", "runnerreplicaset", rs.ObjectMeta.Name)
|
||||
r.Recorder.Event(&rs, corev1.EventTypeNormal, "RunnerDeleted", fmt.Sprintf("Deleted runner '%s'", deletionCandidates[i].Name))
|
||||
log.Info("Deleted runner")
|
||||
}
|
||||
} else if desired > available {
|
||||
n := desired - available
|
||||
|
||||
log.V(0).Info(fmt.Sprintf("Creating %d runner(s)", n), "desired", desired, "available", available, "ready", ready)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
newRunner, err := r.newRunner(rs)
|
||||
if err != nil {
|
||||
|
||||
@@ -47,7 +47,9 @@ func SetupTest(ctx context.Context) *corev1.Namespace {
|
||||
err := k8sClient.Create(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Namespace: ns.Name,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||
|
||||
runnersList = fake.NewRunnersList()
|
||||
@@ -115,9 +117,19 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerReplicaSetSpec{
|
||||
Replicas: intPtr(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "foo/bar",
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
@@ -135,9 +147,26 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name))
|
||||
selector, err := metav1.LabelSelectorAsSelector(
|
||||
&metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "failed to create labelselector")
|
||||
return -1
|
||||
}
|
||||
err = k8sClient.List(
|
||||
ctx,
|
||||
&runners,
|
||||
client.InNamespace(ns.Name),
|
||||
client.MatchingLabelsSelector{Selector: selector},
|
||||
)
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runners")
|
||||
return -1
|
||||
}
|
||||
|
||||
for i, runner := range runners.Items {
|
||||
@@ -176,7 +205,23 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name))
|
||||
selector, err := metav1.LabelSelectorAsSelector(
|
||||
&metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "failed to create labelselector")
|
||||
return -1
|
||||
}
|
||||
err = k8sClient.List(
|
||||
ctx,
|
||||
&runners,
|
||||
client.InNamespace(ns.Name),
|
||||
client.MatchingLabelsSelector{Selector: selector},
|
||||
)
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runners")
|
||||
}
|
||||
@@ -220,6 +265,7 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runners")
|
||||
return -1
|
||||
}
|
||||
|
||||
for i, runner := range runners.Items {
|
||||
|
||||
@@ -55,9 +55,17 @@ func TestAPIs(t *testing.T) {
|
||||
var _ = BeforeSuite(func(done Done) {
|
||||
logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
|
||||
|
||||
var apiServerFlags []string
|
||||
|
||||
apiServerFlags = append(apiServerFlags, envtest.DefaultKubeAPIServerFlags...)
|
||||
// Avoids the following error:
|
||||
// 2021-03-19T15:14:11.673+0900 ERROR controller-runtime.controller Reconciler error {"controller": "testns-tvjzjrunner", "request": "testns-gdnyx/example-runnerdeploy-zps4z-j5562", "error": "Pod \"example-runnerdeploy-zps4z-j5562\" is invalid: [spec.containers[1].image: Required value, spec.containers[1].securityContext.privileged: Forbidden: disallowed by cluster policy]"}
|
||||
apiServerFlags = append(apiServerFlags, "--allow-privileged=true")
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
||||
KubeAPIServerFlags: apiServerFlags,
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -39,10 +40,20 @@ func (c *Config) NewClient() (*Client, error) {
|
||||
if len(c.Token) > 0 {
|
||||
transport = oauth2.NewClient(context.Background(), oauth2.StaticTokenSource(&oauth2.Token{AccessToken: c.Token})).Transport
|
||||
} else {
|
||||
tr, err := ghinstallation.NewKeyFromFile(http.DefaultTransport, c.AppID, c.AppInstallationID, c.AppPrivateKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("authentication failed: %v", err)
|
||||
var tr *ghinstallation.Transport
|
||||
|
||||
if _, err := os.Stat(c.AppPrivateKey); err == nil {
|
||||
tr, err = ghinstallation.NewKeyFromFile(http.DefaultTransport, c.AppID, c.AppInstallationID, c.AppPrivateKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("authentication failed: using private key at %s: %v", c.AppPrivateKey, err)
|
||||
}
|
||||
} else {
|
||||
tr, err = ghinstallation.New(http.DefaultTransport, c.AppID, c.AppInstallationID, []byte(c.AppPrivateKey))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("authentication failed: using private key of size %d (%s...): %v", len(c.AppPrivateKey), strings.Split(c.AppPrivateKey, "\n")[0], err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.EnterpriseURL) > 0 {
|
||||
githubAPIURL, err := getEnterpriseApiUrl(c.EnterpriseURL)
|
||||
if err != nil {
|
||||
@@ -147,7 +158,7 @@ func (c *Client) ListRunners(ctx context.Context, enterprise, org, repo string)
|
||||
|
||||
var runners []*github.Runner
|
||||
|
||||
opts := github.ListOptions{PerPage: 10}
|
||||
opts := github.ListOptions{PerPage: 100}
|
||||
for {
|
||||
list, res, err := c.listRunners(ctx, enterprise, owner, repo, &opts)
|
||||
|
||||
|
||||
35
main.go
35
main.go
@@ -41,8 +41,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
scheme = runtime.NewScheme()
|
||||
setupLog = ctrl.Log.WithName("setup")
|
||||
scheme = runtime.NewScheme()
|
||||
log = ctrl.Log.WithName("actions-runner-controller")
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -63,6 +63,7 @@ func main() {
|
||||
|
||||
runnerImage string
|
||||
dockerImage string
|
||||
namespace string
|
||||
|
||||
commonRunnerLabels commaSeparatedStringSlice
|
||||
)
|
||||
@@ -84,6 +85,7 @@ func main() {
|
||||
flag.StringVar(&c.AppPrivateKey, "github-app-private-key", c.AppPrivateKey, "The path of a private key file to authenticate as a GitHub App")
|
||||
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change")
|
||||
flag.Var(&commonRunnerLabels, "common-runner-labels", "Runner labels in the K1=V1,K2=V2,... format that are inherited all the runners created by the controller. See https://github.com/summerwind/actions-runner-controller/issues/321 for more information")
|
||||
flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.")
|
||||
flag.Parse()
|
||||
|
||||
logger := zap.New(func(o *zap.Options) {
|
||||
@@ -104,15 +106,16 @@ func main() {
|
||||
LeaderElection: enableLeaderElection,
|
||||
Port: 9443,
|
||||
SyncPeriod: &syncPeriod,
|
||||
Namespace: namespace,
|
||||
})
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to start manager")
|
||||
log.Error(err, "unable to start manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
runnerReconciler := &controllers.RunnerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: ctrl.Log.WithName("controllers").WithName("Runner"),
|
||||
Log: log.WithName("runner"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
GitHubClient: ghClient,
|
||||
RunnerImage: runnerImage,
|
||||
@@ -120,64 +123,64 @@ func main() {
|
||||
}
|
||||
|
||||
if err = runnerReconciler.SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Runner")
|
||||
log.Error(err, "unable to create controller", "controller", "Runner")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
runnerSetReconciler := &controllers.RunnerReplicaSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: ctrl.Log.WithName("controllers").WithName("RunnerReplicaSet"),
|
||||
Log: log.WithName("runnerreplicaset"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
GitHubClient: ghClient,
|
||||
}
|
||||
|
||||
if err = runnerSetReconciler.SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "RunnerReplicaSet")
|
||||
log.Error(err, "unable to create controller", "controller", "RunnerReplicaSet")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
runnerDeploymentReconciler := &controllers.RunnerDeploymentReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: ctrl.Log.WithName("controllers").WithName("RunnerDeployment"),
|
||||
Log: log.WithName("runnerdeployment"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
CommonRunnerLabels: commonRunnerLabels,
|
||||
}
|
||||
|
||||
if err = runnerDeploymentReconciler.SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "RunnerDeployment")
|
||||
log.Error(err, "unable to create controller", "controller", "RunnerDeployment")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
horizontalRunnerAutoscaler := &controllers.HorizontalRunnerAutoscalerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Log: ctrl.Log.WithName("controllers").WithName("HorizontalRunnerAutoscaler"),
|
||||
Log: log.WithName("horizontalrunnerautoscaler"),
|
||||
Scheme: mgr.GetScheme(),
|
||||
GitHubClient: ghClient,
|
||||
CacheDuration: syncPeriod - 10*time.Second,
|
||||
}
|
||||
|
||||
if err = horizontalRunnerAutoscaler.SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "HorizontalRunnerAutoscaler")
|
||||
log.Error(err, "unable to create controller", "controller", "HorizontalRunnerAutoscaler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&actionsv1alpha1.Runner{}).SetupWebhookWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create webhook", "webhook", "Runner")
|
||||
log.Error(err, "unable to create webhook", "webhook", "Runner")
|
||||
os.Exit(1)
|
||||
}
|
||||
if err = (&actionsv1alpha1.RunnerDeployment{}).SetupWebhookWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create webhook", "webhook", "RunnerDeployment")
|
||||
log.Error(err, "unable to create webhook", "webhook", "RunnerDeployment")
|
||||
os.Exit(1)
|
||||
}
|
||||
if err = (&actionsv1alpha1.RunnerReplicaSet{}).SetupWebhookWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create webhook", "webhook", "RunnerReplicaSet")
|
||||
log.Error(err, "unable to create webhook", "webhook", "RunnerReplicaSet")
|
||||
os.Exit(1)
|
||||
}
|
||||
// +kubebuilder:scaffold:builder
|
||||
|
||||
setupLog.Info("starting manager")
|
||||
log.Info("starting manager")
|
||||
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||
setupLog.Error(err, "problem running manager")
|
||||
log.Error(err, "problem running manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
8
pkg/actionsglob/README.md
Normal file
8
pkg/actionsglob/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
This package is an implementation of glob that is intended to simulate the behaviour of
|
||||
https://github.com/actions/toolkit/tree/master/packages/glob in many cases.
|
||||
|
||||
This isn't a complete reimplementation of the referenced nodejs package.
|
||||
|
||||
Differences:
|
||||
|
||||
- This package doesn't implement `**`
|
||||
78
pkg/actionsglob/actionsglob.go
Normal file
78
pkg/actionsglob/actionsglob.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package actionsglob
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func Match(pat string, s string) bool {
|
||||
if len(pat) == 0 {
|
||||
panic(fmt.Sprintf("unexpected length of pattern: %d", len(pat)))
|
||||
}
|
||||
|
||||
var inverse bool
|
||||
|
||||
if pat[0] == '!' {
|
||||
pat = pat[1:]
|
||||
inverse = true
|
||||
}
|
||||
|
||||
tokens := strings.SplitAfter(pat, "*")
|
||||
|
||||
var wildcardInHead bool
|
||||
|
||||
for i := 0; i < len(tokens); i++ {
|
||||
p := tokens[i]
|
||||
|
||||
if p == "" {
|
||||
s = ""
|
||||
break
|
||||
}
|
||||
|
||||
if p == "*" {
|
||||
if i == len(tokens)-1 {
|
||||
s = ""
|
||||
break
|
||||
}
|
||||
|
||||
wildcardInHead = true
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
wildcardInTail := p[len(p)-1] == '*'
|
||||
if wildcardInTail {
|
||||
p = p[:len(p)-1]
|
||||
}
|
||||
|
||||
subs := strings.SplitN(s, p, 2)
|
||||
|
||||
if len(subs) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if subs[0] != "" {
|
||||
if !wildcardInHead {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if subs[1] != "" {
|
||||
if !wildcardInTail {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
s = subs[1]
|
||||
|
||||
wildcardInHead = wildcardInTail
|
||||
}
|
||||
|
||||
r := s == ""
|
||||
|
||||
if inverse {
|
||||
r = !r
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
214
pkg/actionsglob/match_test.go
Normal file
214
pkg/actionsglob/match_test.go
Normal file
@@ -0,0 +1,214 @@
|
||||
package actionsglob
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMatch(t *testing.T) {
|
||||
type testcase struct {
|
||||
Pattern, Target string
|
||||
Want bool
|
||||
}
|
||||
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
t.Helper()
|
||||
|
||||
got := Match(tc.Pattern, tc.Target)
|
||||
|
||||
if got != tc.Want {
|
||||
t.Errorf("%s against %s: want %v, got %v", tc.Pattern, tc.Target, tc.Want, got)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("foo == foo", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "foo",
|
||||
Target: "foo",
|
||||
Want: true,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("!foo == foo", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "!foo",
|
||||
Target: "foo",
|
||||
Want: false,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("foo == foo1", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "foo",
|
||||
Target: "foo1",
|
||||
Want: false,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("!foo == foo1", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "!foo",
|
||||
Target: "foo1",
|
||||
Want: true,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("*foo == foo", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "*foo",
|
||||
Target: "foo",
|
||||
Want: true,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("!*foo == foo", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "!*foo",
|
||||
Target: "foo",
|
||||
Want: false,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("*foo == 1foo", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "*foo",
|
||||
Target: "1foo",
|
||||
Want: true,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("!*foo == 1foo", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "!*foo",
|
||||
Target: "1foo",
|
||||
Want: false,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("*foo == foo1", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "*foo",
|
||||
Target: "foo1",
|
||||
Want: false,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("!*foo == foo1", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "!*foo",
|
||||
Target: "foo1",
|
||||
Want: true,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("*foo* == foo1", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "*foo*",
|
||||
Target: "foo1",
|
||||
Want: true,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("!*foo* == foo1", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "!*foo*",
|
||||
Target: "foo1",
|
||||
Want: false,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("*foo == foobar", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "*foo",
|
||||
Target: "foobar",
|
||||
Want: false,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("!*foo == foobar", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "!*foo",
|
||||
Target: "foobar",
|
||||
Want: true,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("*foo* == foobar", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "*foo*",
|
||||
Target: "foobar",
|
||||
Want: true,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("!*foo* == foobar", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "!*foo*",
|
||||
Target: "foobar",
|
||||
Want: false,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("foo* == foo", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "foo*",
|
||||
Target: "foo",
|
||||
Want: true,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("!foo* == foo", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "!foo*",
|
||||
Target: "foo",
|
||||
Want: false,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("foo* == foobar", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "foo*",
|
||||
Target: "foobar",
|
||||
Want: true,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("!foo* == foobar", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "!foo*",
|
||||
Target: "foobar",
|
||||
Want: false,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("foo (* == foo ( 1 / 2 )", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "foo (*",
|
||||
Target: "foo ( 1 / 2 )",
|
||||
Want: true,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("!foo (* == foo ( 1 / 2 )", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "!foo (*",
|
||||
Target: "foo ( 1 / 2 )",
|
||||
Want: false,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("actions-*-metrics == actions-workflow-metrics", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "actions-*-metrics",
|
||||
Target: "actions-workflow-metrics",
|
||||
Want: true,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("!actions-*-metrics == actions-workflow-metrics", func(t *testing.T) {
|
||||
run(t, testcase{
|
||||
Pattern: "!actions-*-metrics",
|
||||
Target: "actions-workflow-metrics",
|
||||
Want: false,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu:18.04
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG RUNNER_VERSION=2.274.2
|
||||
@@ -8,37 +8,37 @@ RUN test -n "$TARGETPLATFORM" || (echo "TARGETPLATFORM must be set" && false)
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt update -y \
|
||||
&& apt install -y software-properties-common \
|
||||
&& add-apt-repository -y ppa:git-core/ppa \
|
||||
&& apt update -y \
|
||||
&& apt install -y --no-install-recommends \
|
||||
build-essential \
|
||||
curl \
|
||||
ca-certificates \
|
||||
dnsutils \
|
||||
ftp \
|
||||
git \
|
||||
iproute2 \
|
||||
iputils-ping \
|
||||
jq \
|
||||
libunwind8 \
|
||||
locales \
|
||||
netcat \
|
||||
openssh-client \
|
||||
parallel \
|
||||
rsync \
|
||||
shellcheck \
|
||||
sudo \
|
||||
telnet \
|
||||
time \
|
||||
tzdata \
|
||||
unzip \
|
||||
upx \
|
||||
wget \
|
||||
zip \
|
||||
zstd \
|
||||
&& cd /usr/bin && ln -sf python3 python \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
&& apt install -y software-properties-common \
|
||||
&& add-apt-repository -y ppa:git-core/ppa \
|
||||
&& apt update -y \
|
||||
&& apt install -y --no-install-recommends \
|
||||
build-essential \
|
||||
curl \
|
||||
ca-certificates \
|
||||
dnsutils \
|
||||
ftp \
|
||||
git \
|
||||
iproute2 \
|
||||
iputils-ping \
|
||||
jq \
|
||||
libunwind8 \
|
||||
locales \
|
||||
netcat \
|
||||
openssh-client \
|
||||
parallel \
|
||||
rsync \
|
||||
shellcheck \
|
||||
sudo \
|
||||
telnet \
|
||||
time \
|
||||
tzdata \
|
||||
unzip \
|
||||
upx \
|
||||
wget \
|
||||
zip \
|
||||
zstd \
|
||||
python-is-python3 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& curl -L -o /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.2/dumb-init_1.2.2_${ARCH} \
|
||||
@@ -46,18 +46,18 @@ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
|
||||
# Docker download supports arm64 as aarch64 & amd64 as x86_64
|
||||
RUN set -vx; \
|
||||
export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
||||
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x86_64 ; fi \
|
||||
&& curl -L -o docker.tgz https://download.docker.com/linux/static/stable/${ARCH}/docker-${DOCKER_VERSION}.tgz \
|
||||
&& tar zxvf docker.tgz \
|
||||
&& install -o root -g root -m 755 docker/docker /usr/local/bin/docker \
|
||||
&& rm -rf docker docker.tgz \
|
||||
&& adduser --disabled-password --gecos "" --uid 1000 runner \
|
||||
&& groupadd docker \
|
||||
&& usermod -aG sudo runner \
|
||||
&& usermod -aG docker runner \
|
||||
&& echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers
|
||||
export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
||||
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x86_64 ; fi \
|
||||
&& curl -L -o docker.tgz https://download.docker.com/linux/static/stable/${ARCH}/docker-${DOCKER_VERSION}.tgz \
|
||||
&& tar zxvf docker.tgz \
|
||||
&& install -o root -g root -m 755 docker/docker /usr/local/bin/docker \
|
||||
&& rm -rf docker docker.tgz \
|
||||
&& adduser --disabled-password --gecos "" --uid 1000 runner \
|
||||
&& groupadd docker \
|
||||
&& usermod -aG sudo runner \
|
||||
&& usermod -aG docker runner \
|
||||
&& echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers
|
||||
|
||||
ENV RUNNER_ASSETS_DIR=/runnertmp
|
||||
|
||||
@@ -67,21 +67,21 @@ ENV RUNNER_ASSETS_DIR=/runnertmp
|
||||
# It is installed after installdependencies.sh and before removing /var/lib/apt/lists
|
||||
# to avoid rerunning apt-update on its own.
|
||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x64 ; fi \
|
||||
&& mkdir -p "$RUNNER_ASSETS_DIR" \
|
||||
&& cd "$RUNNER_ASSETS_DIR" \
|
||||
&& curl -L -o runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${ARCH}-${RUNNER_VERSION}.tar.gz \
|
||||
&& tar xzf ./runner.tar.gz \
|
||||
&& rm runner.tar.gz \
|
||||
&& ./bin/installdependencies.sh \
|
||||
&& mv ./externals ./externalstmp \
|
||||
&& apt-get install -y libyaml-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x64 ; fi \
|
||||
&& mkdir -p "$RUNNER_ASSETS_DIR" \
|
||||
&& cd "$RUNNER_ASSETS_DIR" \
|
||||
&& curl -L -o runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${ARCH}-${RUNNER_VERSION}.tar.gz \
|
||||
&& tar xzf ./runner.tar.gz \
|
||||
&& rm runner.tar.gz \
|
||||
&& ./bin/installdependencies.sh \
|
||||
&& mv ./externals ./externalstmp \
|
||||
&& apt-get install -y libyaml-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN echo AGENT_TOOLSDIRECTORY=/opt/hostedtoolcache > .env \
|
||||
&& mkdir /opt/hostedtoolcache \
|
||||
&& chgrp docker /opt/hostedtoolcache \
|
||||
&& chmod g+rwx /opt/hostedtoolcache
|
||||
&& mkdir /opt/hostedtoolcache \
|
||||
&& chgrp docker /opt/hostedtoolcache \
|
||||
&& chmod g+rwx /opt/hostedtoolcache
|
||||
|
||||
COPY entrypoint.sh /
|
||||
COPY --chown=runner:docker patched $RUNNER_ASSETS_DIR/patched
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
# Dev + DinD dependencies
|
||||
RUN apt update \
|
||||
RUN apt update -y \
|
||||
&& apt install -y software-properties-common \
|
||||
&& add-apt-repository -y ppa:git-core/ppa \
|
||||
&& apt install -y \
|
||||
&& apt update -y \
|
||||
&& apt install -y --no-install-recommends \
|
||||
software-properties-common \
|
||||
build-essential \
|
||||
curl \
|
||||
ca-certificates \
|
||||
@@ -13,7 +14,6 @@ RUN apt update \
|
||||
ftp \
|
||||
git \
|
||||
iproute2 \
|
||||
iptables \
|
||||
iputils-ping \
|
||||
jq \
|
||||
libunwind8 \
|
||||
@@ -21,11 +21,9 @@ RUN apt update \
|
||||
netcat \
|
||||
openssh-client \
|
||||
parallel \
|
||||
python-is-python3 \
|
||||
rsync \
|
||||
shellcheck \
|
||||
sudo \
|
||||
supervisor \
|
||||
telnet \
|
||||
time \
|
||||
tzdata \
|
||||
@@ -34,6 +32,9 @@ RUN apt update \
|
||||
wget \
|
||||
zip \
|
||||
zstd \
|
||||
python-is-python3 \
|
||||
iptables \
|
||||
supervisor \
|
||||
&& rm -rf /var/lib/apt/list/*
|
||||
|
||||
# Runner user
|
||||
@@ -79,7 +80,7 @@ ENV RUNNER_ASSETS_DIR=/runnertmp
|
||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x64 ; fi \
|
||||
&& mkdir -p "$RUNNER_ASSETS_DIR" \
|
||||
&& cd "$RUNNER_ASSETS_DIR" \
|
||||
&& cd "$RUNNER_ASSETS_DIR" \
|
||||
&& curl -L -o runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${ARCH}-${RUNNER_VERSION}.tar.gz \
|
||||
&& tar xzf ./runner.tar.gz \
|
||||
&& rm runner.tar.gz \
|
||||
@@ -88,9 +89,9 @@ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN echo AGENT_TOOLSDIRECTORY=/opt/hostedtoolcache > /runner.env \
|
||||
&& mkdir /opt/hostedtoolcache \
|
||||
&& chgrp docker /opt/hostedtoolcache \
|
||||
&& chmod g+rwx /opt/hostedtoolcache
|
||||
&& mkdir /opt/hostedtoolcache \
|
||||
&& chgrp docker /opt/hostedtoolcache \
|
||||
&& chmod g+rwx /opt/hostedtoolcache
|
||||
|
||||
COPY modprobe startup.sh /usr/local/bin/
|
||||
COPY supervisor/ /etc/supervisor/conf.d/
|
||||
91
runner/Dockerfile.ubuntu.1804
Normal file
91
runner/Dockerfile.ubuntu.1804
Normal file
@@ -0,0 +1,91 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG RUNNER_VERSION=2.274.2
|
||||
ARG DOCKER_VERSION=19.03.12
|
||||
|
||||
RUN test -n "$TARGETPLATFORM" || (echo "TARGETPLATFORM must be set" && false)
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt update -y \
|
||||
&& apt install -y software-properties-common \
|
||||
&& add-apt-repository -y ppa:git-core/ppa \
|
||||
&& apt update -y \
|
||||
&& apt install -y --no-install-recommends \
|
||||
build-essential \
|
||||
curl \
|
||||
ca-certificates \
|
||||
dnsutils \
|
||||
ftp \
|
||||
git \
|
||||
iproute2 \
|
||||
iputils-ping \
|
||||
jq \
|
||||
libunwind8 \
|
||||
locales \
|
||||
netcat \
|
||||
openssh-client \
|
||||
parallel \
|
||||
rsync \
|
||||
shellcheck \
|
||||
sudo \
|
||||
telnet \
|
||||
time \
|
||||
tzdata \
|
||||
unzip \
|
||||
upx \
|
||||
wget \
|
||||
zip \
|
||||
zstd \
|
||||
&& cd /usr/bin && ln -sf python3 python \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& curl -L -o /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.2/dumb-init_1.2.2_${ARCH} \
|
||||
&& chmod +x /usr/local/bin/dumb-init
|
||||
|
||||
# Docker download supports arm64 as aarch64 & amd64 as x86_64
|
||||
RUN set -vx; \
|
||||
export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
||||
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x86_64 ; fi \
|
||||
&& curl -L -o docker.tgz https://download.docker.com/linux/static/stable/${ARCH}/docker-${DOCKER_VERSION}.tgz \
|
||||
&& tar zxvf docker.tgz \
|
||||
&& install -o root -g root -m 755 docker/docker /usr/local/bin/docker \
|
||||
&& rm -rf docker docker.tgz \
|
||||
&& adduser --disabled-password --gecos "" --uid 1000 runner \
|
||||
&& groupadd docker \
|
||||
&& usermod -aG sudo runner \
|
||||
&& usermod -aG docker runner \
|
||||
&& echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers
|
||||
|
||||
ENV RUNNER_ASSETS_DIR=/runnertmp
|
||||
|
||||
# Runner download supports amd64 as x64. Externalstmp is needed for making mount points work inside DinD.
|
||||
#
|
||||
# libyaml-dev is required for ruby/setup-ruby action.
|
||||
# It is installed after installdependencies.sh and before removing /var/lib/apt/lists
|
||||
# to avoid rerunning apt-update on its own.
|
||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x64 ; fi \
|
||||
&& mkdir -p "$RUNNER_ASSETS_DIR" \
|
||||
&& cd "$RUNNER_ASSETS_DIR" \
|
||||
&& curl -L -o runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${ARCH}-${RUNNER_VERSION}.tar.gz \
|
||||
&& tar xzf ./runner.tar.gz \
|
||||
&& rm runner.tar.gz \
|
||||
&& ./bin/installdependencies.sh \
|
||||
&& mv ./externals ./externalstmp \
|
||||
&& apt-get install -y libyaml-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN echo AGENT_TOOLSDIRECTORY=/opt/hostedtoolcache > .env \
|
||||
&& mkdir /opt/hostedtoolcache \
|
||||
&& chgrp docker /opt/hostedtoolcache \
|
||||
&& chmod g+rwx /opt/hostedtoolcache
|
||||
|
||||
COPY entrypoint.sh /
|
||||
COPY --chown=runner:docker patched $RUNNER_ASSETS_DIR/patched
|
||||
|
||||
USER runner
|
||||
ENTRYPOINT ["/usr/local/bin/dumb-init", "--"]
|
||||
CMD ["/entrypoint.sh"]
|
||||
@@ -2,7 +2,7 @@ NAME ?= summerwind/actions-runner
|
||||
DIND_RUNNER_NAME ?= ${NAME}-dind
|
||||
TAG ?= latest
|
||||
|
||||
RUNNER_VERSION ?= 2.274.2
|
||||
RUNNER_VERSION ?= 2.277.1
|
||||
DOCKER_VERSION ?= 19.03.12
|
||||
|
||||
# default list of platforms for which multiarch image is built
|
||||
@@ -22,16 +22,15 @@ else
|
||||
export PUSH_ARG="--push"
|
||||
endif
|
||||
|
||||
docker-build:
|
||||
docker-build-ubuntu:
|
||||
docker build --build-arg TARGETPLATFORM=amd64 --build-arg RUNNER_VERSION=${RUNNER_VERSION} --build-arg DOCKER_VERSION=${DOCKER_VERSION} -t ${NAME}:${TAG} .
|
||||
docker build --build-arg TARGETPLATFORM=amd64 --build-arg RUNNER_VERSION=${RUNNER_VERSION} --build-arg DOCKER_VERSION=${DOCKER_VERSION} -t ${DIND_RUNNER_NAME}:${TAG} -f dindrunner.Dockerfile .
|
||||
docker build --build-arg TARGETPLATFORM=amd64 --build-arg RUNNER_VERSION=${RUNNER_VERSION} --build-arg DOCKER_VERSION=${DOCKER_VERSION} -t ${DIND_RUNNER_NAME}:${TAG} -f Dockerfile.dindrunner .
|
||||
|
||||
|
||||
docker-push:
|
||||
docker-push-ubuntu:
|
||||
docker push ${NAME}:${TAG}
|
||||
docker push ${DIND_RUNNER_NAME}:${TAG}
|
||||
|
||||
docker-buildx:
|
||||
docker-buildx-ubuntu:
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
@if ! docker buildx ls | grep -q container-builder; then\
|
||||
docker buildx create --platform ${PLATFORMS} --name container-builder --use;\
|
||||
@@ -46,5 +45,5 @@ docker-buildx:
|
||||
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
||||
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
||||
-t "${DIND_RUNNER_NAME}:latest" \
|
||||
-f dindrunner.Dockerfile \
|
||||
-f Dockerfile.dindrunner \
|
||||
. ${PUSH_ARG}
|
||||
|
||||
@@ -29,21 +29,13 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -n "${RUNNER_WORKDIR}" ]; then
|
||||
WORKDIR_ARG="--work ${RUNNER_WORKDIR}"
|
||||
fi
|
||||
|
||||
if [ -n "${RUNNER_LABELS}" ]; then
|
||||
LABEL_ARG="--labels ${RUNNER_LABELS}"
|
||||
fi
|
||||
|
||||
if [ -z "${RUNNER_TOKEN}" ]; then
|
||||
echo "RUNNER_TOKEN must be set" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${RUNNER_REPO}" ] && [ -n "${RUNNER_ORG}" ] && [ -n "${RUNNER_GROUP}" ];then
|
||||
RUNNER_GROUP_ARG="--runnergroup ${RUNNER_GROUP}"
|
||||
if [ -z "${RUNNER_REPO}" ] && [ -n "${RUNNER_GROUP}" ];then
|
||||
RUNNER_GROUPS=${RUNNER_GROUP}
|
||||
fi
|
||||
|
||||
# Hack due to https://github.com/summerwind/actions-runner-controller/issues/252#issuecomment-758338483
|
||||
@@ -56,7 +48,14 @@ sudo chown -R runner:docker /runner
|
||||
mv /runnertmp/* /runner/
|
||||
|
||||
cd /runner
|
||||
./config.sh --unattended --replace --name "${RUNNER_NAME}" --url "${GITHUB_URL}${ATTACH}" --token "${RUNNER_TOKEN}" ${RUNNER_GROUP_ARG} ${LABEL_ARG} ${WORKDIR_ARG}
|
||||
./config.sh --unattended --replace \
|
||||
--name "${RUNNER_NAME}" \
|
||||
--url "${GITHUB_URL}${ATTACH}" \
|
||||
--token "${RUNNER_TOKEN}" \
|
||||
--runnergroup "${RUNNER_GROUPS}" \
|
||||
--labels "${RUNNER_LABELS}" \
|
||||
--work "${RUNNER_WORKDIR}"
|
||||
|
||||
mkdir ./externals
|
||||
# Hack due to the DinD volumes
|
||||
mv ./externalstmp/* ./externals/
|
||||
|
||||
@@ -17,6 +17,34 @@ function wait_for_process () {
|
||||
return 0
|
||||
}
|
||||
|
||||
sudo /bin/bash <<SCRIPT
|
||||
mkdir -p /etc/docker
|
||||
|
||||
cat <<EOS > /etc/docker/daemon.json
|
||||
{
|
||||
EOS
|
||||
|
||||
if [ -n "${MTU}" ]; then
|
||||
cat <<EOS >> /etc/docker/daemon.json
|
||||
"mtu": ${MTU}
|
||||
EOS
|
||||
# See https://docs.docker.com/engine/security/rootless/
|
||||
echo "environment=DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=${MTU}" >> /etc/supervisor/conf.d/dockerd.conf
|
||||
fi
|
||||
|
||||
cat <<EOS >> /etc/docker/daemon.json
|
||||
}
|
||||
EOS
|
||||
SCRIPT
|
||||
|
||||
INFO "Using /etc/docker/daemon.json with the following content"
|
||||
|
||||
cat /etc/docker/daemon.json
|
||||
|
||||
INFO "Using /etc/supervisor/conf.d/dockerd.conf with the following content"
|
||||
|
||||
cat /etc/supervisor/conf.d/dockerd.conf
|
||||
|
||||
INFO "Starting supervisor"
|
||||
sudo /usr/bin/supervisord -n >> /dev/null 2>&1 &
|
||||
|
||||
@@ -27,11 +55,17 @@ for process in "${processes[@]}"; do
|
||||
wait_for_process "$process"
|
||||
if [ $? -ne 0 ]; then
|
||||
ERROR "$process is not running after max time"
|
||||
ERROR "Dumping /var/log/dockerd.err.log to help investigation"
|
||||
cat /var/log/dockerd.err.log
|
||||
exit 1
|
||||
else
|
||||
INFO "$process is running"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -n "${MTU}" ]; then
|
||||
ifconfig docker0 mtu ${MTU} up
|
||||
fi
|
||||
|
||||
# Wait processes to be running
|
||||
entrypoint.sh
|
||||
|
||||
Reference in New Issue
Block a user