mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 19:50:30 +00:00
Compare commits
115 Commits
actions-ru
...
actions-ru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3b2d2c052e | ||
|
|
37c2a62fa8 | ||
|
|
2eeb56d1c8 | ||
|
|
a612b38f9b | ||
|
|
1c67ea65d9 | ||
|
|
c26fb5ad5f | ||
|
|
325c2cc385 | ||
|
|
2e551c9d0a | ||
|
|
7b44454d01 | ||
|
|
f2680b2f2d | ||
|
|
b42b8406a2 | ||
|
|
3c125e2191 | ||
|
|
9ed245c85e | ||
|
|
5b7807d54b | ||
|
|
156e2c1987 | ||
|
|
da4dfb3fdf | ||
|
|
0783ffe989 | ||
|
|
374105c1f3 | ||
|
|
bc6e499e4f | ||
|
|
07f822bb08 | ||
|
|
3a0332dfdc | ||
|
|
f6ab66c55b | ||
|
|
d874a5cfda | ||
|
|
c424215044 | ||
|
|
c5fdfd63db | ||
|
|
23a45eaf87 | ||
|
|
dee997b44e | ||
|
|
2929a739e3 | ||
|
|
3cccca8d09 | ||
|
|
7a7086e7aa | ||
|
|
565b14a148 | ||
|
|
ecc441de3f | ||
|
|
25335bb3c3 | ||
|
|
9b871567b1 | ||
|
|
264cf494e3 | ||
|
|
3f23501b8e | ||
|
|
5530030c67 | ||
|
|
8d3a83b07a | ||
|
|
a6270b44d5 | ||
|
|
2273b198a1 | ||
|
|
3d62e73f8c | ||
|
|
f5c639ae28 | ||
|
|
81016154c0 | ||
|
|
728829be7b | ||
|
|
c0b8f9d483 | ||
|
|
ced1c2321a | ||
|
|
1b8a656051 | ||
|
|
1753fa3530 | ||
|
|
8c0f3dfc79 | ||
|
|
dbda292f54 | ||
|
|
550a864198 | ||
|
|
4fa5315311 | ||
|
|
11e58fcc41 | ||
|
|
f220fefe92 | ||
|
|
56b4598d1d | ||
|
|
8f977dbe48 | ||
|
|
9ae3551744 | ||
|
|
05ad3f5469 | ||
|
|
9c7372a8e0 | ||
|
|
584590e97c | ||
|
|
d18884a0b9 | ||
|
|
f987571b64 | ||
|
|
450e384c4c | ||
|
|
e9eef04993 | ||
|
|
598dd1d9fe | ||
|
|
9890a90e69 | ||
|
|
9da123ae5e | ||
|
|
4d4137aa28 | ||
|
|
022007078e | ||
|
|
31e5e61155 | ||
|
|
1d1453c5f2 | ||
|
|
e44e53b88e | ||
|
|
398791241e | ||
|
|
991535e567 | ||
|
|
2d7fbbfb68 | ||
|
|
dd0b9f3e95 | ||
|
|
7cb2bc84c8 | ||
|
|
b0e74bebab | ||
|
|
dfbe53dcca | ||
|
|
ebc3970b84 | ||
|
|
1ddcf6946a | ||
|
|
cfbaad38c8 | ||
|
|
67f6de010b | ||
|
|
2db608879a | ||
|
|
2c4a6ca90b | ||
|
|
829bf20449 | ||
|
|
be13322816 | ||
|
|
7f4a76a39b | ||
|
|
0fce761686 | ||
|
|
c88ff44518 | ||
|
|
2fdf35ac9d | ||
|
|
6cce3fefc5 | ||
|
|
eb2eaf8130 | ||
|
|
7bf712d0d4 | ||
|
|
7d024a6c05 | ||
|
|
434823bcb3 | ||
|
|
35d047db01 | ||
|
|
f1db6af1c5 | ||
|
|
4f3f2fb60d | ||
|
|
2623140c9a | ||
|
|
1db9d9d574 | ||
|
|
d046350240 | ||
|
|
cca4d249e9 | ||
|
|
bc8bc70f69 | ||
|
|
34c6c3d9cd | ||
|
|
9c8d7305f1 | ||
|
|
addcbfa7ee | ||
|
|
bbb036e732 | ||
|
|
9301409aec | ||
|
|
ab1c39de57 | ||
|
|
a4350d0fc2 | ||
|
|
2146c62c9e | ||
|
|
28e80a2d28 | ||
|
|
831db9ee2a | ||
|
|
4d69e0806e |
123
.github/workflows/build-and-release-runners.yml
vendored
Normal file
123
.github/workflows/build-and-release-runners.yml
vendored
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
name: Build and Release Runners
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- '**'
|
||||||
|
paths:
|
||||||
|
- 'runner/**'
|
||||||
|
- .github/workflows/build-and-release-runners.yml
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths:
|
||||||
|
- runner/patched/*
|
||||||
|
- runner/Dockerfile
|
||||||
|
- runner/Dockerfile.ubuntu.1804
|
||||||
|
- runner/Dockerfile.dindrunner
|
||||||
|
- runner/entrypoint.sh
|
||||||
|
- .github/workflows/build-and-release-runners.yml
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Build ${{ matrix.name }}-ubuntu-${{ matrix.os-version }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- name: actions-runner
|
||||||
|
os-version: 20.04
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
- name: actions-runner
|
||||||
|
os-version: 18.04
|
||||||
|
dockerfile: Dockerfile.ubuntu.1804
|
||||||
|
- name: actions-runner-dind
|
||||||
|
os-version: 20.04
|
||||||
|
dockerfile: Dockerfile.dindrunner
|
||||||
|
env:
|
||||||
|
RUNNER_VERSION: 2.277.1
|
||||||
|
DOCKER_VERSION: 19.03.12
|
||||||
|
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
||||||
|
steps:
|
||||||
|
- name: Set outputs
|
||||||
|
id: vars
|
||||||
|
run: echo ::set-output name=sha_short::${GITHUB_SHA::7}
|
||||||
|
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
if: ${{ github.event_name == 'push' || github.event_name == 'release' }}
|
||||||
|
with:
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and Push Versioned Tags
|
||||||
|
uses: docker/build-push-action@v2
|
||||||
|
with:
|
||||||
|
context: ./runner
|
||||||
|
file: ./runner/${{ matrix.dockerfile }}
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
build-args: |
|
||||||
|
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||||
|
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||||
|
tags: |
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-ubuntu-${{ matrix.os-version }}
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-ubuntu-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
||||||
|
|
||||||
|
latest-tags:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Build ${{ matrix.name }}-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- name: actions-runner
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
- name: actions-runner-dind
|
||||||
|
dockerfile: Dockerfile.dindrunner
|
||||||
|
env:
|
||||||
|
RUNNER_VERSION: 2.277.1
|
||||||
|
DOCKER_VERSION: 19.03.12
|
||||||
|
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
||||||
|
steps:
|
||||||
|
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
if: ${{ github.event_name == 'push' || github.event_name == 'release' }}
|
||||||
|
with:
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and Push Latest Tag
|
||||||
|
uses: docker/build-push-action@v2
|
||||||
|
with:
|
||||||
|
context: ./runner
|
||||||
|
file: ./runner/${{ matrix.dockerfile }}
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
build-args: |
|
||||||
|
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||||
|
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||||
|
tags: |
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:latest
|
||||||
64
.github/workflows/build-runner.yml
vendored
64
.github/workflows/build-runner.yml
vendored
@@ -1,64 +0,0 @@
|
|||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- '**'
|
|
||||||
paths:
|
|
||||||
- 'runner/**'
|
|
||||||
- .github/workflows/build-runner.yml
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
paths:
|
|
||||||
- runner/patched/*
|
|
||||||
- runner/Dockerfile
|
|
||||||
- runner/dindrunner.Dockerfile
|
|
||||||
- runner/entrypoint.sh
|
|
||||||
- .github/workflows/build-runner.yml
|
|
||||||
name: Runner
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Build ${{ matrix.name }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- name: actions-runner
|
|
||||||
dockerfile: Dockerfile
|
|
||||||
- name: actions-runner-dind
|
|
||||||
dockerfile: dindrunner.Dockerfile
|
|
||||||
env:
|
|
||||||
RUNNER_VERSION: 2.275.1
|
|
||||||
DOCKER_VERSION: 19.03.12
|
|
||||||
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
|
|
||||||
- name: Login to DockerHub
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
if: ${{ github.event_name == 'push' }}
|
|
||||||
with:
|
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build [and Push]
|
|
||||||
uses: docker/build-push-action@v2
|
|
||||||
with:
|
|
||||||
context: ./runner
|
|
||||||
file: ./runner/${{ matrix.dockerfile }}
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
|
||||||
build-args: |
|
|
||||||
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
|
||||||
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
|
||||||
tags: |
|
|
||||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}
|
|
||||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:latest
|
|
||||||
9
.github/workflows/release.yml
vendored
9
.github/workflows/release.yml
vendored
@@ -9,6 +9,10 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
||||||
steps:
|
steps:
|
||||||
|
- name: Set outputs
|
||||||
|
id: vars
|
||||||
|
run: echo ::set-output name=sha_short::${GITHUB_SHA::7}
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
@@ -52,5 +56,8 @@ jobs:
|
|||||||
file: Dockerfile
|
file: Dockerfile
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
push: true
|
push: true
|
||||||
tags: ${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}
|
tags: |
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:${{ env.VERSION }}-${{ steps.vars.outputs.sha_short }}
|
||||||
|
|
||||||
|
|||||||
1
.github/workflows/test.yaml
vendored
1
.github/workflows/test.yaml
vendored
@@ -6,6 +6,7 @@ on:
|
|||||||
- master
|
- master
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- 'runner/**'
|
- 'runner/**'
|
||||||
|
- .github/workflows/build-and-release-runners.yml
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
|
|||||||
6
.github/workflows/wip.yml
vendored
6
.github/workflows/wip.yml
vendored
@@ -30,11 +30,13 @@ jobs:
|
|||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
|
||||||
|
# Considered unstable builds
|
||||||
|
# See Issue #285, PR #286, and PR #323 for more information
|
||||||
- name: Build and Push
|
- name: Build and Push
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v2
|
||||||
with:
|
with:
|
||||||
file: Dockerfile
|
file: Dockerfile
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
push: true
|
push: true
|
||||||
tags: ${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:latest
|
tags: |
|
||||||
|
${{ env.DOCKERHUB_USERNAME }}/actions-runner-controller:canary
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -26,3 +26,6 @@ bin
|
|||||||
|
|
||||||
.envrc
|
.envrc
|
||||||
*.pem
|
*.pem
|
||||||
|
|
||||||
|
# OS
|
||||||
|
.DS_STORE
|
||||||
8
CONTRIBUTING.md
Normal file
8
CONTRIBUTING.md
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# Contributing
|
||||||
|
|
||||||
|
### Helm Verison Bumps
|
||||||
|
|
||||||
|
**Chart Version :** When bumping the chart version follow semantic versioning https://semver.org/<br />
|
||||||
|
**App Version :** When bumping the app version you will also need to bump the chart verison too. Again, follow semantic verisoning when bumping the chart.
|
||||||
|
|
||||||
|
To determine if you need tp bump the MAJOR, MINOR or PATCH versions you will need to review the changes between the previous app version and the new app verison and / or ask for a maintainer to advise.
|
||||||
@@ -22,7 +22,8 @@ COPY . .
|
|||||||
RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
|
RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
|
||||||
export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) && \
|
export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) && \
|
||||||
GOARM=$(echo ${TARGETPLATFORM} | cut -d / -f3 | cut -c2-) && \
|
GOARM=$(echo ${TARGETPLATFORM} | cut -d / -f3 | cut -c2-) && \
|
||||||
go build -a -o manager main.go
|
go build -a -o manager main.go && \
|
||||||
|
go build -a -o github-webhook-server ./cmd/githubwebhookserver
|
||||||
|
|
||||||
# Use distroless as minimal base image to package the manager binary
|
# Use distroless as minimal base image to package the manager binary
|
||||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||||
@@ -31,6 +32,7 @@ FROM gcr.io/distroless/static:nonroot
|
|||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
|
||||||
COPY --from=builder /workspace/manager .
|
COPY --from=builder /workspace/manager .
|
||||||
|
COPY --from=builder /workspace/github-webhook-server .
|
||||||
|
|
||||||
USER nonroot:nonroot
|
USER nonroot:nonroot
|
||||||
|
|
||||||
|
|||||||
90
Makefile
90
Makefile
@@ -14,6 +14,8 @@ else
|
|||||||
GOBIN=$(shell go env GOBIN)
|
GOBIN=$(shell go env GOBIN)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
TEST_ASSETS=$(PWD)/test-assets
|
||||||
|
|
||||||
# default list of platforms for which multiarch image is built
|
# default list of platforms for which multiarch image is built
|
||||||
ifeq (${PLATFORMS}, )
|
ifeq (${PLATFORMS}, )
|
||||||
export PLATFORMS="linux/amd64,linux/arm64"
|
export PLATFORMS="linux/amd64,linux/arm64"
|
||||||
@@ -37,6 +39,13 @@ all: manager
|
|||||||
test: generate fmt vet manifests
|
test: generate fmt vet manifests
|
||||||
go test ./... -coverprofile cover.out
|
go test ./... -coverprofile cover.out
|
||||||
|
|
||||||
|
test-with-deps: kube-apiserver etcd kubectl
|
||||||
|
# See https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants
|
||||||
|
TEST_ASSET_KUBE_APISERVER=$(KUBE_APISERVER_BIN) \
|
||||||
|
TEST_ASSET_ETCD=$(ETCD_BIN) \
|
||||||
|
TEST_ASSET_KUBECTL=$(KUBECTL_BIN) \
|
||||||
|
make test
|
||||||
|
|
||||||
# Build manager binary
|
# Build manager binary
|
||||||
manager: generate fmt vet
|
manager: generate fmt vet
|
||||||
go build -o bin/manager main.go
|
go build -o bin/manager main.go
|
||||||
@@ -126,7 +135,8 @@ release/clean:
|
|||||||
rm -rf release
|
rm -rf release
|
||||||
|
|
||||||
.PHONY: acceptance
|
.PHONY: acceptance
|
||||||
acceptance: release/clean docker-build docker-push release
|
acceptance: release/clean docker-build release
|
||||||
|
make acceptance/pull
|
||||||
ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
||||||
ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
||||||
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
||||||
@@ -134,8 +144,23 @@ acceptance: release/clean docker-build docker-push release
|
|||||||
|
|
||||||
acceptance/kind:
|
acceptance/kind:
|
||||||
kind create cluster --name acceptance
|
kind create cluster --name acceptance
|
||||||
|
kind load docker-image ${NAME}:${VERSION} --name acceptance
|
||||||
|
kind load docker-image quay.io/brancz/kube-rbac-proxy:v0.8.0 --name acceptance
|
||||||
|
kind load docker-image summerwind/actions-runner:latest --name acceptance
|
||||||
|
kind load docker-image docker:dind --name acceptance
|
||||||
|
kind load docker-image quay.io/jetstack/cert-manager-controller:v1.0.4 --name acceptance
|
||||||
|
kind load docker-image quay.io/jetstack/cert-manager-cainjector:v1.0.4 --name acceptance
|
||||||
|
kind load docker-image quay.io/jetstack/cert-manager-webhook:v1.0.4 --name acceptance
|
||||||
kubectl cluster-info --context kind-acceptance
|
kubectl cluster-info --context kind-acceptance
|
||||||
|
|
||||||
|
acceptance/pull:
|
||||||
|
docker pull quay.io/brancz/kube-rbac-proxy:v0.8.0
|
||||||
|
docker pull summerwind/actions-runner:latest
|
||||||
|
docker pull docker:dind
|
||||||
|
docker pull quay.io/jetstack/cert-manager-controller:v1.0.4
|
||||||
|
docker pull quay.io/jetstack/cert-manager-cainjector:v1.0.4
|
||||||
|
docker pull quay.io/jetstack/cert-manager-webhook:v1.0.4
|
||||||
|
|
||||||
acceptance/setup:
|
acceptance/setup:
|
||||||
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.yaml #kubectl create namespace actions-runner-system
|
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.yaml #kubectl create namespace actions-runner-system
|
||||||
kubectl -n cert-manager wait deploy/cert-manager-cainjector --for condition=available --timeout 60s
|
kubectl -n cert-manager wait deploy/cert-manager-cainjector --for condition=available --timeout 60s
|
||||||
@@ -191,3 +216,66 @@ ifeq (, $(wildcard $(GOBIN)/yq))
|
|||||||
}
|
}
|
||||||
endif
|
endif
|
||||||
YQ=$(GOBIN)/yq
|
YQ=$(GOBIN)/yq
|
||||||
|
|
||||||
|
OS_NAME := $(shell uname -s | tr A-Z a-z)
|
||||||
|
|
||||||
|
# find or download etcd
|
||||||
|
etcd:
|
||||||
|
ifeq (, $(wildcard $(TEST_ASSETS)/etcd))
|
||||||
|
@{ \
|
||||||
|
set -xe ;\
|
||||||
|
INSTALL_TMP_DIR=$$(mktemp -d) ;\
|
||||||
|
cd $$INSTALL_TMP_DIR ;\
|
||||||
|
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||||
|
mkdir -p $(TEST_ASSETS) ;\
|
||||||
|
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||||
|
rm -rf $$INSTALL_TMP_DIR ;\
|
||||||
|
}
|
||||||
|
ETCD_BIN=$(TEST_ASSETS)/etcd
|
||||||
|
else
|
||||||
|
ETCD_BIN=$(TEST_ASSETS)/etcd
|
||||||
|
endif
|
||||||
|
|
||||||
|
# find or download kube-apiserver
|
||||||
|
kube-apiserver:
|
||||||
|
ifeq (, $(wildcard $(TEST_ASSETS)/kube-apiserver))
|
||||||
|
@{ \
|
||||||
|
set -xe ;\
|
||||||
|
INSTALL_TMP_DIR=$$(mktemp -d) ;\
|
||||||
|
cd $$INSTALL_TMP_DIR ;\
|
||||||
|
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||||
|
mkdir -p $(TEST_ASSETS) ;\
|
||||||
|
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||||
|
rm -rf $$INSTALL_TMP_DIR ;\
|
||||||
|
}
|
||||||
|
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
|
||||||
|
else
|
||||||
|
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
|
||||||
|
endif
|
||||||
|
|
||||||
|
|
||||||
|
# find or download kubectl
|
||||||
|
kubectl:
|
||||||
|
ifeq (, $(wildcard $(TEST_ASSETS)/kubectl))
|
||||||
|
@{ \
|
||||||
|
set -xe ;\
|
||||||
|
INSTALL_TMP_DIR=$$(mktemp -d) ;\
|
||||||
|
cd $$INSTALL_TMP_DIR ;\
|
||||||
|
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||||
|
mkdir -p $(TEST_ASSETS) ;\
|
||||||
|
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||||
|
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||||
|
rm -rf $$INSTALL_TMP_DIR ;\
|
||||||
|
}
|
||||||
|
KUBECTL_BIN=$(TEST_ASSETS)/kubectl
|
||||||
|
else
|
||||||
|
KUBECTL_BIN=$(TEST_ASSETS)/kubectl
|
||||||
|
endif
|
||||||
|
|||||||
401
README.md
401
README.md
@@ -1,7 +1,32 @@
|
|||||||
# actions-runner-controller
|
# actions-runner-controller
|
||||||
|
|
||||||
|
[](https://github.com/jonico/awesome-runners)
|
||||||
|
|
||||||
This controller operates self-hosted runners for GitHub Actions on your Kubernetes cluster.
|
This controller operates self-hosted runners for GitHub Actions on your Kubernetes cluster.
|
||||||
|
|
||||||
|
ToC:
|
||||||
|
|
||||||
|
- [Motivation](#motivation)
|
||||||
|
- [Installation](#installation)
|
||||||
|
- [GitHub Enterprise support](#github-enterprise-support)
|
||||||
|
- [Setting up authentication with GitHub API](#setting-up-authentication-with-github-api)
|
||||||
|
- [Deploying using GitHub App Authentication](#deploying-using-github-app-authentication)
|
||||||
|
- [Deploying using PAT Authentication](#deploying-using-pat-authentication)
|
||||||
|
- [Usage](#usage)
|
||||||
|
- [Repository Runners](#repository-runners)
|
||||||
|
- [Organization Runners](#organization-runners)
|
||||||
|
- [Runner Deployments](#runnerdeployments)
|
||||||
|
- [Autoscaling](#autoscaling)
|
||||||
|
- [Faster Autoscaling with GitHub Webhook](#faster-autoscaling-with-github-webhook)
|
||||||
|
- [Runner with DinD](#runner-with-dind)
|
||||||
|
- [Additional tweaks](#additional-tweaks)
|
||||||
|
- [Runner labels](#runner-labels)
|
||||||
|
- [Runner groups](#runner-groups)
|
||||||
|
- [Using EKS IAM role for service accounts](#using-eks-iam-role-for-service-accounts)
|
||||||
|
- [Software installed in the runner image](#software-installed-in-the-runner-image)
|
||||||
|
- [Common errors](#common-errors)
|
||||||
|
- [Contributing](#contributing)
|
||||||
|
|
||||||
## Motivation
|
## Motivation
|
||||||
|
|
||||||
[GitHub Actions](https://github.com/features/actions) is a very useful tool for automating development. GitHub Actions jobs are run in the cloud by default, but you may want to run your jobs in your environment. [Self-hosted runner](https://github.com/actions/runner) can be used for such use cases, but requires the provisioning and configuration of a virtual machine instance. Instead if you already have a Kubernetes cluster, it makes more sense to run the self-hosted runner on top of it.
|
[GitHub Actions](https://github.com/features/actions) is a very useful tool for automating development. GitHub Actions jobs are run in the cloud by default, but you may want to run your jobs in your environment. [Self-hosted runner](https://github.com/actions/runner) can be used for such use cases, but requires the provisioning and configuration of a virtual machine instance. Instead if you already have a Kubernetes cluster, it makes more sense to run the self-hosted runner on top of it.
|
||||||
@@ -18,44 +43,70 @@ Install the custom resource and actions-runner-controller with `kubectl` or `hel
|
|||||||
|
|
||||||
`kubectl`:
|
`kubectl`:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
# REPLACE "v0.16.1" with the latest release
|
# REPLACE "v0.18.2" with the version you wish to deploy
|
||||||
kubectl apply -f https://github.com/summerwind/actions-runner-controller/releases/download/v0.16.1/actions-runner-controller.yaml
|
kubectl apply -f https://github.com/summerwind/actions-runner-controller/releases/download/v0.18.2/actions-runner-controller.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
`helm`:
|
`helm`:
|
||||||
|
|
||||||
```
|
```shell
|
||||||
helm repo add actions-runner-controller https://summerwind.github.io/actions-runner-controller
|
helm repo add actions-runner-controller https://summerwind.github.io/actions-runner-controller
|
||||||
helm upgrade --install -n actions-runner-system actions-runner-controller/actions-runner-controller
|
helm upgrade --install -n actions-runner-system actions-runner-controller/actions-runner-controller
|
||||||
```
|
```
|
||||||
|
|
||||||
### Github Enterprise support
|
### Github Enterprise support
|
||||||
|
|
||||||
If you use either Github Enterprise Cloud or Server (and have recent enought version supporting Actions), you can use **actions-runner-controller** with those, too. Authentication works same way as with public Github (repo and organization level).
|
If you use either Github Enterprise Cloud or Server, you can use **actions-runner-controller** with those, too.
|
||||||
|
Authentication works same way as with public Github (repo and organization level).
|
||||||
|
The minimum version of Github Enterprise Server is 3.0.0 (or rc1/rc2).
|
||||||
|
__**NOTE : The maintainers do not have an Enterprise environment to be able to test changes and so this feature is community driven. Support is on a best endeavors basis.**__
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
kubectl set env deploy controller-manager -c manager GITHUB_ENTERPRISE_URL=<GHEC/S URL> --namespace actions-runner-system
|
kubectl set env deploy controller-manager -c manager GITHUB_ENTERPRISE_URL=<GHEC/S URL> --namespace actions-runner-system
|
||||||
```
|
```
|
||||||
|
|
||||||
[Enterprise level](https://docs.github.com/en/enterprise-server@2.22/actions/hosting-your-own-runners/adding-self-hosted-runners#adding-a-self-hosted-runner-to-an-enterprise) runners are not working yet as there's no API definition for those.
|
#### Enterprise runners usage
|
||||||
|
|
||||||
|
In order to use enterprise runners you must have Admin access to Github Enterprise and you should do Personal Access Token (PAT)
|
||||||
|
with `enterprise:admin` access. Enterprise runners are not possible to run with Github APP or any other permission.
|
||||||
|
|
||||||
|
When you use enterprise runners those will get access to Github Organisations. However, access to the repositories is **NOT**
|
||||||
|
allowed by default. Each Github Organisation must allow Enterprise runner groups to be used in repositories.
|
||||||
|
This is needed only one time and is permanent after that.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: RunnerDeployment
|
||||||
|
metadata:
|
||||||
|
name: ghe-runner-deployment
|
||||||
|
spec:
|
||||||
|
replicas: 2
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
enterprise: your-enterprise-name
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: "4000m"
|
||||||
|
memory: "2Gi"
|
||||||
|
requests:
|
||||||
|
cpu: "200m"
|
||||||
|
memory: "200Mi"
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
## Setting up authentication with GitHub API
|
## Setting up authentication with GitHub API
|
||||||
|
|
||||||
There are two ways for actions-runner-controller to authenticate with the GitHub API:
|
There are two ways for actions-runner-controller to authenticate with the GitHub API (only 1 can be configured at a time however):
|
||||||
|
|
||||||
1. Using GitHub App.
|
1. Using GitHub App.
|
||||||
2. Using Personal Access Token.
|
2. Using Personal Access Token.
|
||||||
|
|
||||||
Regardless of which authentication method you use, the same permissions are required, those permissions are:
|
Functionality wise there isn't a difference between the 2 authentication methods. There are however some benefits to using a GitHub App for authentication over a PAT such as an [increased API quota](https://docs.github.com/en/developers/apps/rate-limits-for-github-apps), if you run into rate limiting consider deploying this solution using GitHub App authentication instead.
|
||||||
- Repository: Administration (read/write)
|
|
||||||
- Repository: Actions (read)
|
|
||||||
- Organization: Self-hosted runners (read/write)
|
|
||||||
|
|
||||||
|
### Deploying using GitHub App Authentication
|
||||||
**NOTE: It is extremely important to only follow one of the sections below and not both.**
|
|
||||||
|
|
||||||
### Using GitHub App
|
|
||||||
|
|
||||||
You can create a GitHub App for either your account or any organization. If you want to create a GitHub App for your account, open the following link to the creation page, enter any unique name in the "GitHub App name" field, and hit the "Create GitHub App" button at the bottom of the page.
|
You can create a GitHub App for either your account or any organization. If you want to create a GitHub App for your account, open the following link to the creation page, enter any unique name in the "GitHub App name" field, and hit the "Create GitHub App" button at the bottom of the page.
|
||||||
|
|
||||||
@@ -92,19 +143,29 @@ $ kubectl create secret generic controller-manager \
|
|||||||
--from-file=github_app_private_key=${PRIVATE_KEY_FILE_PATH}
|
--from-file=github_app_private_key=${PRIVATE_KEY_FILE_PATH}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Using Personal Access Token
|
### Deploying using PAT Authentication
|
||||||
|
|
||||||
From an account that has `admin` privileges for the repository, create a [personal access token](https://github.com/settings/tokens) with `repo` scope. This token is used to register a self-hosted runner by *actions-runner-controller*.
|
Personal Acess Token can be used to register a self-hosted runner by *actions-runner-controller*.
|
||||||
|
|
||||||
Self-hosted runners in GitHub can either be connected to a single repository, or to a GitHub organization (so they are available to all repositories in the organization). This token is used to register a self-hosted runner by *actions-runner-controller*.
|
Self-hosted runners in GitHub can either be connected to a single repository, or to a GitHub organization (so they are available to all repositories in the organization). How you plan on using the runner will affect what scopes are needed for the token.
|
||||||
|
|
||||||
For adding a runner to a repository, the token should have `repo` scope. If the runner should be added to an organization, the token should have `admin:org` scope. Note that to use a Personal Access Token, you must issue the token with an account that has `admin` privileges (on the repository and/or the organization).
|
Log-in to a GitHub account that has `admin` privileges for the repository, and [create a personal access token](https://github.com/settings/tokens/new) with the appropriate scopes listed below:
|
||||||
|
|
||||||
Open the Create Token page from the following link, grant the `repo` and/or `admin:org` scope, and press the "Generate Token" button at the bottom of the page to create the token.
|
**Scopes for a Repository Runner**
|
||||||
|
|
||||||
- [Create personal access token](https://github.com/settings/tokens/new)
|
* repo (Full control)
|
||||||
|
|
||||||
Register the created token (`GITHUB_TOKEN`) as a Kubernetes secret.
|
**Scopes for a Organization Runner**
|
||||||
|
|
||||||
|
* repo (Full control)
|
||||||
|
* admin:org (Full control)
|
||||||
|
* admin:public_key - read:public_key
|
||||||
|
* admin:repo_hook - read:repo_hook
|
||||||
|
* admin:org_hook
|
||||||
|
* notifications
|
||||||
|
* workflow
|
||||||
|
|
||||||
|
Once you have created the appropriate token, deploy it as a secret to your kubernetes cluster that you are going to deploy the solution on:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
kubectl create secret generic controller-manager \
|
kubectl create secret generic controller-manager \
|
||||||
@@ -119,7 +180,7 @@ There are two ways to use this controller:
|
|||||||
- Manage runners one by one with `Runner`.
|
- Manage runners one by one with `Runner`.
|
||||||
- Manage a set of runners with `RunnerDeployment`.
|
- Manage a set of runners with `RunnerDeployment`.
|
||||||
|
|
||||||
### Repository runners
|
### Repository Runners
|
||||||
|
|
||||||
To launch a single self-hosted runner, you need to create a manifest file includes *Runner* resource as follows. This example launches a self-hosted runner with name *example-runner* for the *summerwind/actions-runner-controller* repository.
|
To launch a single self-hosted runner, you need to create a manifest file includes *Runner* resource as follows. This example launches a self-hosted runner with name *example-runner* for the *summerwind/actions-runner-controller* repository.
|
||||||
|
|
||||||
@@ -217,9 +278,33 @@ example-runnerdeploy2475ht2qbr mumoshu/actions-runner-controller-ci Running
|
|||||||
|
|
||||||
#### Autoscaling
|
#### Autoscaling
|
||||||
|
|
||||||
`RunnerDeployment` can scale the number of runners between `minReplicas` and `maxReplicas` fields, depending on pending workflow runs.
|
A `RunnerDeployment` can scale the number of runners between `minReplicas` and `maxReplicas` fields based the chosen scaling metric as defined in the `metrics` attribute
|
||||||
|
|
||||||
|
**Scaling Metrics**
|
||||||
|
|
||||||
|
**TotalNumberOfQueuedAndInProgressWorkflowRuns**
|
||||||
|
|
||||||
|
In the below example, `actions-runner` will poll GitHub for all pending workflows with the poll period defined by the sync period configuration. It will then scale to e.g. 3 if there're 3 pending jobs at sync time.
|
||||||
|
With this scaling metric we are required to define a list of repositories within our metric.
|
||||||
|
|
||||||
|
The scale out performance is controlled via the manager containers startup `--sync-period` argument. The default value is set to 10 minutes to prevent default deployments rate limiting themselves from the GitHub API.
|
||||||
|
|
||||||
|
**Kustomize Config :** The period can be customised in the `config/default/manager_auth_proxy_patch.yaml` patch<br />
|
||||||
|
**Helm Config :** `syncPeriod`
|
||||||
|
|
||||||
|
**Benefits of this metric**
|
||||||
|
1. Supports named repositories allowing you to restrict the runner to a specified set of repositories server side.
|
||||||
|
2. Scales the runner count based on the actual queue depth of the jobs meaning a more 1:1 scaling of runners to queued jobs.
|
||||||
|
3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [Github labels](#runner-labels).
|
||||||
|
|
||||||
|
**Drawbacks of this metric**
|
||||||
|
1. Repositories must be named within the scaling metric, maintaining a list of repositories may not be viable in larger environments or self-serve environments.
|
||||||
|
2. May not scale quick enough for some users needs. This metric is pull based and so the queue depth is polled as configured by the sync period, as a result scaling performance is bound by this sync period meaning there is a lag to scaling activity.
|
||||||
|
3. Relatively large amounts of API requests required to maintain this metric, you may run in API rate limiting issues depending on the size of your environment and how aggressive your sync period configuration is
|
||||||
|
|
||||||
|
|
||||||
|
Example `RunnerDeployment` backed by a `HorizontalRunnerAutoscaler`
|
||||||
|
|
||||||
In the below example, `actions-runner` checks for pending workflow runs for each sync period, and scale to e.g. 3 if there're 3 pending jobs at sync time.
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
@@ -246,38 +331,34 @@ spec:
|
|||||||
- summerwind/actions-runner-controller
|
- summerwind/actions-runner-controller
|
||||||
```
|
```
|
||||||
|
|
||||||
The scale out performance is controlled via the manager containers startup `--sync-period` argument. The default value is 10 minutes to prevent unconfigured deployments rate limiting themselves from the GitHub API. The period can be customised in the `config/default/manager_auth_proxy_patch.yaml` patch for those that are building the solution via the kustomize setup.
|
Additionally, the `HorizontalRunnerAutoscaler` also has an anti-flapping option that prevents periodic loop of scaling up and down.
|
||||||
|
By default, it doesn't scale down until the grace period of 10 minutes passes after a scale up. The grace period can be configured however by adding the setting `scaleDownDelaySecondsAfterScaleOut` in the `HorizontalRunnerAutoscaler` `spec`:
|
||||||
Additionally, the autoscaling feature has an anti-flapping option that prevents periodic loop of scaling up and down.
|
|
||||||
By default, it doesn't scale down until the grace period of 10 minutes passes after a scale up. The grace period can be configured by setting `scaleDownDelaySecondsAfterScaleUp`:
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
|
||||||
kind: RunnerDeployment
|
|
||||||
metadata:
|
|
||||||
name: example-runner-deployment
|
|
||||||
spec:
|
spec:
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
repository: summerwind/actions-runner-controller
|
|
||||||
---
|
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
|
||||||
kind: HorizontalRunnerAutoscaler
|
|
||||||
metadata:
|
|
||||||
name: example-runner-deployment-autoscaler
|
|
||||||
spec:
|
|
||||||
scaleTargetRef:
|
|
||||||
name: example-runner-deployment
|
|
||||||
minReplicas: 1
|
|
||||||
maxReplicas: 3
|
|
||||||
scaleDownDelaySecondsAfterScaleOut: 60
|
scaleDownDelaySecondsAfterScaleOut: 60
|
||||||
metrics:
|
|
||||||
- type: TotalNumberOfQueuedAndInProgressWorkflowRuns
|
|
||||||
repositoryNames:
|
|
||||||
- summerwind/actions-runner-controller
|
|
||||||
```
|
```
|
||||||
|
|
||||||
If you do not want to manage an explicit list of repositories to scale, an alternate autoscaling scheme that can be applied is the PercentageRunnersBusy scheme. The number of desired pods are evaulated by checking how many runners are currently busy and applying a scaleup or scale down factor if certain thresholds are met. By setting the metric type to PercentageRunnersBusy, the HorizontalRunnerAutoscaler will query github for the number of busy runners which live in the RunnerDeployment namespace. Scaleup and scaledown thresholds are the percentage of busy runners at which the number of desired runners are re-evaluated. Scaleup and scaledown factors are the multiplicative factor applied to the current number of runners used to calculate the number of desired runners. This scheme is also especially useful if you want multiple controllers in various clusters, each responsible for scaling their own runner pods per namespace.
|
**PercentageRunnersBusy**
|
||||||
|
|
||||||
|
The `HorizontalRunnerAutoscaler` will poll GitHub based on the configuration sync period for the number of busy runners which live in the RunnerDeployment's namespace and scale based on the settings
|
||||||
|
|
||||||
|
**Kustomize Config :** The period can be customised in the `config/default/manager_auth_proxy_patch.yaml` patch<br />
|
||||||
|
**Helm Config :** `syncPeriod`
|
||||||
|
|
||||||
|
**Benefits of this metric**
|
||||||
|
1. Supports named repositories server side the same as the `TotalNumberOfQueuedAndInProgressWorkflowRuns` metric [#313](https://github.com/summerwind/actions-runner-controller/pull/313)
|
||||||
|
2. Supports GitHub organisation wide scaling without maintaining an explicit list of repositories, this is especially useful for those that are working at a larger scale. [#223](https://github.com/summerwind/actions-runner-controller/pull/223)
|
||||||
|
3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [Github labels](#runner-labels)
|
||||||
|
4. Supports scaling desired runner count on both a percentage increase / decrease basis as well as on a fixed increase / decrease count basis [#223](https://github.com/summerwind/actions-runner-controller/pull/223) [#315](https://github.com/summerwind/actions-runner-controller/pull/315)
|
||||||
|
|
||||||
|
**Drawbacks of this metric**
|
||||||
|
1. May not scale quick enough for some users needs. This metric is pull based and so the number of busy runners are polled as configured by the sync period, as a result scaling performance is bound by this sync period meaning there is a lag to scaling activity.
|
||||||
|
2. We are scaling up and down based on indicative information rather than a count of the actual number of queued jobs and so the desired runner count is likely to under provision new runners or overprovision them relative to actual job queue depth, this may or may not be a problem for you.
|
||||||
|
|
||||||
|
|
||||||
|
Examples of each scaling type implemented with a `RunnerDeployment` backed by a `HorizontalRunnerAutoscaler`:
|
||||||
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
---
|
---
|
||||||
@@ -290,16 +371,153 @@ spec:
|
|||||||
name: example-runner-deployment
|
name: example-runner-deployment
|
||||||
minReplicas: 1
|
minReplicas: 1
|
||||||
maxReplicas: 3
|
maxReplicas: 3
|
||||||
scaleDownDelaySecondsAfterScaleOut: 60
|
|
||||||
metrics:
|
metrics:
|
||||||
- type: PercentageRunnersBusy
|
- type: PercentageRunnersBusy
|
||||||
scaleUpThreshold: '0.75'
|
scaleUpThreshold: '0.75' # The percentage of busy runners at which the number of desired runners are re-evaluated to scale up
|
||||||
scaleDownThreshold: '0.3'
|
scaleDownThreshold: '0.3' # The percentage of busy runners at which the number of desired runners are re-evaluated to scale down
|
||||||
scaleUpFactor: '1.4'
|
scaleUpFactor: '1.4' # The scale up multiplier factor applied to desired count
|
||||||
scaleDownFactor: '0.7'
|
scaleDownFactor: '0.7' # The scale down multiplier factor applied to desired count
|
||||||
```
|
```
|
||||||
|
|
||||||
## Runner with DinD
|
```yaml
|
||||||
|
---
|
||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
metadata:
|
||||||
|
name: example-runner-deployment-autoscaler
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
name: example-runner-deployment
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 3
|
||||||
|
metrics:
|
||||||
|
- type: PercentageRunnersBusy
|
||||||
|
scaleUpThreshold: '0.75' # The percentage of busy runners at which the number of desired runners are re-evaluated to scale up
|
||||||
|
scaleDownThreshold: '0.3' # The percentage of busy runners at which the number of desired runners are re-evaluated to scale down
|
||||||
|
ScaleUpAdjustment: '2' # The scale up runner count added to desired count
|
||||||
|
ScaleDownAdjustment: '1' # The scale down runner count subtracted from the desired count
|
||||||
|
```
|
||||||
|
|
||||||
|
Like the previous metric, the scale down factor respects the anti-flapping configuration is applied to the `HorizontalRunnerAutoscaler` as mentioned previously:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
spec:
|
||||||
|
scaleDownDelaySecondsAfterScaleOut: 60
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Faster Autoscaling with GitHub Webhook
|
||||||
|
|
||||||
|
> This feature is an ADVANCED feature which may require more work to set up.
|
||||||
|
> Please get prepared to put some time and effort to learn and leverage this feature!
|
||||||
|
|
||||||
|
`actions-runner-controller` has an optional Webhook server that receives GitHub Webhook events and scale
|
||||||
|
[`RunnerDeployments`](#runnerdeployments) by updating corresponding [`HorizontalRunnerAutoscalers`](#autoscaling).
|
||||||
|
|
||||||
|
Today, the Webhook server can be configured to respond GitHub `check_run`, `pull_request`, and `push` events
|
||||||
|
by scaling up the matching `HorizontalRunnerAutoscaler` by N replica(s), where `N` is configurable within
|
||||||
|
`HorizontalRunerAutoscaler's` `Spec`.
|
||||||
|
|
||||||
|
More concretely, you can configure the targeted GitHub event types and the `N` in
|
||||||
|
`scaleUpTriggers`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
name: myrunners
|
||||||
|
scaleUpTriggers:
|
||||||
|
- githubEvent:
|
||||||
|
checkRun:
|
||||||
|
types: ["created"]
|
||||||
|
status: "queued"
|
||||||
|
amount: 1
|
||||||
|
duration: "5m"
|
||||||
|
```
|
||||||
|
|
||||||
|
With the above example, the webhook server scales `myrunners` by `1` replica for 5 minutes on each `check_run` event
|
||||||
|
with the type of `created` and the status of `queued` received.
|
||||||
|
|
||||||
|
The primary benefit of autoscaling on Webhook compared to the standard autoscaling is that this one allows you to
|
||||||
|
immediately add "resource slack" for future GitHub Actions job runs.
|
||||||
|
|
||||||
|
In contrast, the standard autoscaling requires you to wait next sync period to add
|
||||||
|
insufficient runners. You can definitely shorten the sync period to make the standard autoscaling more responsive.
|
||||||
|
But doing so eventually result in the controller not functional due to GitHub API rate limit.
|
||||||
|
|
||||||
|
> You can learn the implementation details in #282
|
||||||
|
|
||||||
|
To enable this feature, you firstly need to install the webhook server.
|
||||||
|
|
||||||
|
Currently, only our Helm chart has the ability install it.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ helm --upgrade install actions-runner-controller/actions-runner-controller \
|
||||||
|
githubWebhookServer.enabled=true \
|
||||||
|
githubWebhookServer.ports[0].nodePort=33080
|
||||||
|
```
|
||||||
|
|
||||||
|
The above command will result in exposing the node port 33080 for Webhook events. Usually, you need to create an
|
||||||
|
external loadbalancer targeted to the node port, and register the hostname or the IP address of the external loadbalancer
|
||||||
|
to the GitHub Webhook.
|
||||||
|
|
||||||
|
Once you were able to confirm that the Webhook server is ready and running from GitHub - this is usually verified by the
|
||||||
|
GitHub sending PING events to the Webhook server - create or update your `HorizontalRunnerAutoscaler` resources
|
||||||
|
by learning the following configuration examples.
|
||||||
|
|
||||||
|
- [Example 1: Scale up on each `check_run` event](#example-1-scale-up-on-each-check_run-event)
|
||||||
|
- [Example 2: Scale on each `pull_request` event against `develop` or `main` branches](#example-2-scale-on-each-pull_request-event-against-develop-or-main-branches)
|
||||||
|
|
||||||
|
##### Example 1: Scale up on each `check_run` event
|
||||||
|
|
||||||
|
> Note: This should work almost like https://github.com/philips-labs/terraform-aws-github-runner
|
||||||
|
|
||||||
|
To scale up replicas of the runners for `example/myrepo` by 1 for 5 minutes on each `check_run`, you write manifests like the below:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kind: RunnerDeployment
|
||||||
|
metadata:
|
||||||
|
name: myrunners
|
||||||
|
spec:
|
||||||
|
repository: example/myrepo
|
||||||
|
---
|
||||||
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
name: myrunners
|
||||||
|
scaleUpTriggers:
|
||||||
|
- githubEvent:
|
||||||
|
checkRun:
|
||||||
|
types: ["created"]
|
||||||
|
status: "queued"
|
||||||
|
amount: 1
|
||||||
|
duration: "5m"
|
||||||
|
```
|
||||||
|
|
||||||
|
###### Example 2: Scale on each `pull_request` event against `develop` or `main` branches
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
kind: RunnerDeployment:
|
||||||
|
metadata:
|
||||||
|
name: myrunners
|
||||||
|
spec:
|
||||||
|
repository: example/myrepo
|
||||||
|
---
|
||||||
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
name: myrunners
|
||||||
|
scaleUpTriggers:
|
||||||
|
- githubEvent:
|
||||||
|
pullRequest:
|
||||||
|
types: ["synchronize"]
|
||||||
|
branches: ["main", "develop"]
|
||||||
|
amount: 1
|
||||||
|
duration: "5m"
|
||||||
|
```
|
||||||
|
|
||||||
|
See ["activity types"](https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request) for the list of valid values for `scaleUpTriggers[].githubEvent.pullRequest.types`.
|
||||||
|
|
||||||
|
### Runner with DinD
|
||||||
|
|
||||||
When using default runner, runner pod starts up 2 containers: runner and DinD (Docker-in-Docker). This might create issues if there's `LimitRange` set to namespace.
|
When using default runner, runner pod starts up 2 containers: runner and DinD (Docker-in-Docker). This might create issues if there's `LimitRange` set to namespace.
|
||||||
|
|
||||||
@@ -321,7 +539,7 @@ spec:
|
|||||||
|
|
||||||
This also helps with resources, as you don't need to give resources separately to docker and runner.
|
This also helps with resources, as you don't need to give resources separately to docker and runner.
|
||||||
|
|
||||||
## Additional tweaks
|
### Additional tweaks
|
||||||
|
|
||||||
You can pass details through the spec selector. Here's an eg. of what you may like to do:
|
You can pass details through the spec selector. Here's an eg. of what you may like to do:
|
||||||
|
|
||||||
@@ -353,12 +571,20 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
cpu: "2.0"
|
cpu: "2.0"
|
||||||
memory: "4Gi"
|
memory: "4Gi"
|
||||||
|
# Timeout after a node crashed or became unreachable to evict your pods somewhere else (default 5mins)
|
||||||
|
tolerations:
|
||||||
|
- key: "node.kubernetes.io/unreachable"
|
||||||
|
operator: "Exists"
|
||||||
|
effect: "NoExecute"
|
||||||
|
tolerationSeconds: 10
|
||||||
|
# true (default) = A privileged docker sidecar container is included in the runner pod.
|
||||||
|
# false = A docker sidecar container is not included in the runner pod and you can't use docker.
|
||||||
# If set to false, there are no privileged container and you cannot use docker.
|
# If set to false, there are no privileged container and you cannot use docker.
|
||||||
dockerEnabled: false
|
dockerEnabled: false
|
||||||
# If set to true, runner pod container only 1 container that's expected to be able to run docker, too.
|
# false (default) = Docker support is provided by a sidecar container deployed in the runner pod.
|
||||||
# image summerwind/actions-runner-dind or custom one should be used with true -value
|
# true = No docker sidecar container is deployed in the runner pod but docker can be used within teh runner container instead. The image summerwind/actions-runner-dind is used by default.
|
||||||
dockerdWithinRunnerContainer: false
|
dockerdWithinRunnerContainer: true
|
||||||
# Valid if dockerdWithinRunnerContainer is not true
|
# Docker sidecar container image tweaks examples below, only applicable if dockerdWithinRunnerContainer = false
|
||||||
dockerdContainerResources:
|
dockerdContainerResources:
|
||||||
limits:
|
limits:
|
||||||
cpu: "4.0"
|
cpu: "4.0"
|
||||||
@@ -366,6 +592,7 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
cpu: "2.0"
|
cpu: "2.0"
|
||||||
memory: "4Gi"
|
memory: "4Gi"
|
||||||
|
# Additional N number of sidecar containers
|
||||||
sidecarContainers:
|
sidecarContainers:
|
||||||
- name: mysql
|
- name: mysql
|
||||||
image: mysql:5.7
|
image: mysql:5.7
|
||||||
@@ -374,13 +601,24 @@ spec:
|
|||||||
value: abcd1234
|
value: abcd1234
|
||||||
securityContext:
|
securityContext:
|
||||||
runAsUser: 0
|
runAsUser: 0
|
||||||
# if workDir is not specified, the default working directory is /runner/_work
|
# workDir if not specified (default = /runner/_work)
|
||||||
# this setting allows you to customize the working directory location
|
# You can customise this setting allowing you to change the default working directory location
|
||||||
# for example, the below setting is the same as on the ubuntu-18.04 image
|
# for example, the below setting is the same as on the ubuntu-18.04 image
|
||||||
workDir: /home/runner/work
|
workDir: /home/runner/work
|
||||||
|
# You can mount some of the shared volumes to the dind container using dockerVolumeMounts, like any other volume mounting.
|
||||||
|
# NOTE: in case you want to use an hostPath like the following example, make sure that Kubernetes doesn't schedule more than one runner
|
||||||
|
# per physical host. You can achieve that by setting pod anti-affinity rules and/or resource requests/limits.
|
||||||
|
volumes:
|
||||||
|
- name: docker-extra
|
||||||
|
hostPath:
|
||||||
|
path: /mnt/docker-extra
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
dockerVolumeMounts:
|
||||||
|
- mountPath: /var/lib/docker
|
||||||
|
name: docker-extra
|
||||||
```
|
```
|
||||||
|
|
||||||
## Runner labels
|
### Runner labels
|
||||||
|
|
||||||
To run a workflow job on a self-hosted runner, you can use the following syntax in your workflow:
|
To run a workflow job on a self-hosted runner, you can use the following syntax in your workflow:
|
||||||
|
|
||||||
@@ -417,7 +655,7 @@ jobs:
|
|||||||
|
|
||||||
Note that if you specify `self-hosted` in your workflow, then this will run your job on _any_ self-hosted runner, regardless of the labels that they have.
|
Note that if you specify `self-hosted` in your workflow, then this will run your job on _any_ self-hosted runner, regardless of the labels that they have.
|
||||||
|
|
||||||
## Runner Groups
|
### Runner Groups
|
||||||
|
|
||||||
Runner groups can be used to limit which repositories are able to use the GitHub Runner at an Organisation level. Runner groups have to be [created in GitHub first](https://docs.github.com/en/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups) before they can be referenced.
|
Runner groups can be used to limit which repositories are able to use the GitHub Runner at an Organisation level. Runner groups have to be [created in GitHub first](https://docs.github.com/en/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups) before they can be referenced.
|
||||||
|
|
||||||
@@ -436,7 +674,7 @@ spec:
|
|||||||
group: NewGroup
|
group: NewGroup
|
||||||
```
|
```
|
||||||
|
|
||||||
## Using EKS IAM role for service accounts
|
### Using EKS IAM role for service accounts
|
||||||
|
|
||||||
`actions-runner-controller` v0.15.0 or later has support for EKS IAM role for service accounts.
|
`actions-runner-controller` v0.15.0 or later has support for EKS IAM role for service accounts.
|
||||||
|
|
||||||
@@ -462,7 +700,7 @@ spec:
|
|||||||
fsGroup: 1447
|
fsGroup: 1447
|
||||||
```
|
```
|
||||||
|
|
||||||
## Software installed in the runner image
|
### Software installed in the runner image
|
||||||
|
|
||||||
The GitHub hosted runners include a large amount of pre-installed software packages. For Ubuntu 18.04, this list can be found at <https://github.com/actions/virtual-environments/blob/master/images/linux/Ubuntu1804-README.md>
|
The GitHub hosted runners include a large amount of pre-installed software packages. For Ubuntu 18.04, this list can be found at <https://github.com/actions/virtual-environments/blob/master/images/linux/Ubuntu1804-README.md>
|
||||||
|
|
||||||
@@ -497,9 +735,9 @@ spec:
|
|||||||
image: YOUR_CUSTOM_DOCKER_IMAGE
|
image: YOUR_CUSTOM_DOCKER_IMAGE
|
||||||
```
|
```
|
||||||
|
|
||||||
## Common Errors
|
### Common Errors
|
||||||
|
|
||||||
### invalid header field value
|
#### invalid header field value
|
||||||
|
|
||||||
```json
|
```json
|
||||||
2020-11-12T22:17:30.693Z ERROR controller-runtime.controller Reconciler error {"controller": "runner", "request": "actions-runner-system/runner-deployment-dk7q8-dk5c9", "error": "failed to create registration token: Post \"https://api.github.com/orgs/$YOUR_ORG_HERE/actions/runners/registration-token\": net/http: invalid header field value \"Bearer $YOUR_TOKEN_HERE\\n\" for key Authorization"}
|
2020-11-12T22:17:30.693Z ERROR controller-runtime.controller Reconciler error {"controller": "runner", "request": "actions-runner-system/runner-deployment-dk7q8-dk5c9", "error": "failed to create registration token: Post \"https://api.github.com/orgs/$YOUR_ORG_HERE/actions/runners/registration-token\": net/http: invalid header field value \"Bearer $YOUR_TOKEN_HERE\\n\" for key Authorization"}
|
||||||
@@ -510,8 +748,11 @@ Your base64'ed PAT token has a new line at the end, it needs to be created witho
|
|||||||
* `echo -n $TOKEN | base64`
|
* `echo -n $TOKEN | base64`
|
||||||
* Create the secret as described in the docs using the shell and documeneted flags
|
* Create the secret as described in the docs using the shell and documeneted flags
|
||||||
|
|
||||||
# Developing
|
# Contributing
|
||||||
|
|
||||||
|
For more details about any requirements or process, please check out [Getting Started with Contributing](CONTRIBUTING.md).
|
||||||
|
|
||||||
|
**The Controller**<br />
|
||||||
If you'd like to modify the controller to fork or contribute, I'd suggest using the following snippet for running
|
If you'd like to modify the controller to fork or contribute, I'd suggest using the following snippet for running
|
||||||
the acceptance test:
|
the acceptance test:
|
||||||
|
|
||||||
@@ -524,7 +765,7 @@ NAME=$DOCKER_USER/actions-runner-controller \
|
|||||||
APP_ID=*** \
|
APP_ID=*** \
|
||||||
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||||
INSTALLATION_ID=*** \
|
INSTALLATION_ID=*** \
|
||||||
make docker-build docker-push acceptance
|
make docker-build acceptance
|
||||||
```
|
```
|
||||||
|
|
||||||
Please follow the instructions explained in [Using Personal Access Token](#using-personal-access-token) to obtain
|
Please follow the instructions explained in [Using Personal Access Token](#using-personal-access-token) to obtain
|
||||||
@@ -545,17 +786,9 @@ NAME=$DOCKER_USER/actions-runner-controller \
|
|||||||
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||||
INSTALLATION_ID=*** \
|
INSTALLATION_ID=*** \
|
||||||
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
||||||
make docker-build docker-push \
|
make docker-build acceptance/setup \
|
||||||
acceptance/setup acceptance/tests
|
acceptance/tests
|
||||||
```
|
```
|
||||||
# Alternatives
|
|
||||||
|
|
||||||
The following is a list of alternative solutions that may better fit you depending on your use-case:
|
**Runner Tests**<br />
|
||||||
|
A set of example pipelines (./acceptance/pipelines) are provided in this repository which you can use to validate your runners are working as expected. When raising a PR please run the relevant suites to prove your change hasn't broken anything.
|
||||||
- <https://github.com/evryfs/github-actions-runner-operator/>
|
|
||||||
|
|
||||||
Although the situation can change over time, as of writing this sentence, the benefits of using `actions-runner-controller` over the alternatives are:
|
|
||||||
|
|
||||||
- `actions-runner-controller` has the ability to autoscale runners based on number of pending/progressing jobs (#99)
|
|
||||||
- `actions-runner-controller` is able to gracefully stop runners (#103)
|
|
||||||
- `actions-runner-controller` has ARM support
|
|
||||||
|
|||||||
@@ -12,6 +12,9 @@ done
|
|||||||
|
|
||||||
echo Found runner ${runner_name}.
|
echo Found runner ${runner_name}.
|
||||||
|
|
||||||
|
# Wait a bit to make sure the runner pod is created before looking for it.
|
||||||
|
sleep 2
|
||||||
|
|
||||||
pod_name=
|
pod_name=
|
||||||
|
|
||||||
while [ -z "${pod_name}" ]; do
|
while [ -z "${pod_name}" ]; do
|
||||||
@@ -24,6 +27,6 @@ echo Found pod ${pod_name}.
|
|||||||
|
|
||||||
echo Waiting for pod ${runner_name} to become ready... 1>&2
|
echo Waiting for pod ${runner_name} to become ready... 1>&2
|
||||||
|
|
||||||
kubectl wait pod/${runner_name} --for condition=ready --timeout 180s
|
kubectl wait pod/${runner_name} --for condition=ready --timeout 270s
|
||||||
|
|
||||||
echo All tests passed. 1>&2
|
echo All tests passed. 1>&2
|
||||||
|
|||||||
@@ -26,13 +26,14 @@ if [ "${tool}" == "helm" ]; then
|
|||||||
charts/actions-runner-controller \
|
charts/actions-runner-controller \
|
||||||
-n actions-runner-system \
|
-n actions-runner-system \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
--set syncPeriod=5m
|
--set syncPeriod=5m \
|
||||||
kubectl -n actions-runner-system wait deploy/actions-runner-controller --for condition=available
|
--set authSecret.create=false
|
||||||
|
kubectl -n actions-runner-system wait deploy/actions-runner-controller --for condition=available --timeout 60s
|
||||||
else
|
else
|
||||||
kubectl apply \
|
kubectl apply \
|
||||||
-n actions-runner-system \
|
-n actions-runner-system \
|
||||||
-f release/actions-runner-controller.yaml
|
-f release/actions-runner-controller.yaml
|
||||||
kubectl -n actions-runner-system wait deploy/controller-manager --for condition=available --timeout 60s
|
kubectl -n actions-runner-system wait deploy/controller-manager --for condition=available --timeout 120s
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
|
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
|
||||||
|
|||||||
36
acceptance/pipelines/eks-integration-tests.yaml
Normal file
36
acceptance/pipelines/eks-integration-tests.yaml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
name: EKS Integration Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
IRSA_ROLE_ARN:
|
||||||
|
ASSUME_ROLE_ARN:
|
||||||
|
AWS_REGION:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
assume-role-in-runner-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- name: Test aws-actions/configure-aws-credentials Action
|
||||||
|
uses: aws-actions/configure-aws-credentials@v1
|
||||||
|
with:
|
||||||
|
aws-region: ${{ env.AWS_REGION }}
|
||||||
|
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
|
||||||
|
role-duration-seconds: 900
|
||||||
|
assume-role-in-container-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
container:
|
||||||
|
image: amazon/aws-cli
|
||||||
|
env:
|
||||||
|
AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||||
|
AWS_ROLE_ARN: ${{ env.IRSA_ROLE_ARN }}
|
||||||
|
volumes:
|
||||||
|
- /var/run/secrets/eks.amazonaws.com/serviceaccount/token:/var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||||
|
steps:
|
||||||
|
- name: Test aws-actions/configure-aws-credentials Action in container
|
||||||
|
uses: aws-actions/configure-aws-credentials@v1
|
||||||
|
with:
|
||||||
|
aws-region: ${{ env.AWS_REGION }}
|
||||||
|
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
|
||||||
|
role-duration-seconds: 900
|
||||||
83
acceptance/pipelines/runner-integration-tests.yaml
Normal file
83
acceptance/pipelines/runner-integration-tests.yaml
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
name: Runner Integration Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
ImageOS: ubuntu18 # Used by ruby/setup-ruby action | Update me for the runner OS version you are testing against
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run-step-in-container-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
container:
|
||||||
|
image: alpine
|
||||||
|
steps:
|
||||||
|
- name: Test we are working in the container
|
||||||
|
run: |
|
||||||
|
if [[ $(sed -n '2p' < /etc/os-release | cut -d "=" -f2) != "alpine" ]]; then
|
||||||
|
echo "::error ::Failed OS detection test, could not match /etc/os-release with alpine. Are we really running in the container?"
|
||||||
|
echo "/etc/os-release below:"
|
||||||
|
cat /etc/os-release
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
setup-python-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- name: Print native Python environment
|
||||||
|
run: |
|
||||||
|
which python
|
||||||
|
python --version
|
||||||
|
- uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: 3.9
|
||||||
|
- name: Test actions/setup-python works
|
||||||
|
run: |
|
||||||
|
VERSION=$(python --version 2>&1 | cut -d ' ' -f2 | cut -d '.' -f1-2)
|
||||||
|
if [[ $VERSION != '3.9' ]]; then
|
||||||
|
echo "Python version detected : $(python --version 2>&1)"
|
||||||
|
echo "::error ::Detected python failed setup version test, could not match version with version specified in the setup action"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "Python version detected : $(python --version 2>&1)"
|
||||||
|
fi
|
||||||
|
setup-node-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- uses: actions/setup-node@v2
|
||||||
|
with:
|
||||||
|
node-version: '12'
|
||||||
|
- name: Test actions/setup-node works
|
||||||
|
run: |
|
||||||
|
VERSION=$(node --version | cut -c 2- | cut -d '.' -f1)
|
||||||
|
if [[ $VERSION != '12' ]]; then
|
||||||
|
echo "Node version detected : $(node --version 2>&1)"
|
||||||
|
echo "::error ::Detected node failed setup version test, could not match version with version specified in the setup action"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "Node version detected : $(node --version 2>&1)"
|
||||||
|
fi
|
||||||
|
setup-ruby-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- uses: ruby/setup-ruby@v1
|
||||||
|
with:
|
||||||
|
ruby-version: 3.0
|
||||||
|
bundler-cache: true
|
||||||
|
- name: Test ruby/setup-ruby works
|
||||||
|
run: |
|
||||||
|
VERSION=$(ruby --version | cut -d ' ' -f2 | cut -d '.' -f1-2)
|
||||||
|
if [[ $VERSION != '3.0' ]]; then
|
||||||
|
echo "Ruby version detected : $(ruby --version 2>&1)"
|
||||||
|
echo "::error ::Detected ruby failed setup version test, could not match version with version specified in the setup action"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "Ruby version detected : $(ruby --version 2>&1)"
|
||||||
|
fi
|
||||||
|
python-shell-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- name: Test Python shell works
|
||||||
|
run: |
|
||||||
|
import os
|
||||||
|
print(os.environ['PATH'])
|
||||||
|
shell: python
|
||||||
11
acceptance/testdata/runnerdeploy.yaml
vendored
11
acceptance/testdata/runnerdeploy.yaml
vendored
@@ -7,3 +7,14 @@ spec:
|
|||||||
template:
|
template:
|
||||||
spec:
|
spec:
|
||||||
repository: mumoshu/actions-runner-controller-ci
|
repository: mumoshu/actions-runner-controller-ci
|
||||||
|
#
|
||||||
|
# dockerd within runner container
|
||||||
|
#
|
||||||
|
## Replace `mumoshu/actions-runner-dind:dev` with your dind image
|
||||||
|
#dockerdWithinRunnerContainer: true
|
||||||
|
#image: mumoshu/actions-runner-dind:dev
|
||||||
|
|
||||||
|
#
|
||||||
|
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
|
||||||
|
#
|
||||||
|
#dockerMTU: 1450
|
||||||
|
|||||||
@@ -41,6 +41,62 @@ type HorizontalRunnerAutoscalerSpec struct {
|
|||||||
// Metrics is the collection of various metric targets to calculate desired number of runners
|
// Metrics is the collection of various metric targets to calculate desired number of runners
|
||||||
// +optional
|
// +optional
|
||||||
Metrics []MetricSpec `json:"metrics,omitempty"`
|
Metrics []MetricSpec `json:"metrics,omitempty"`
|
||||||
|
|
||||||
|
// ScaleUpTriggers is an experimental feature to increase the desired replicas by 1
|
||||||
|
// on each webhook requested received by the webhookBasedAutoscaler.
|
||||||
|
//
|
||||||
|
// This feature requires you to also enable and deploy the webhookBasedAutoscaler onto your cluster.
|
||||||
|
//
|
||||||
|
// Note that the added runners remain until the next sync period at least,
|
||||||
|
// and they may or may not be used by GitHub Actions depending on the timing.
|
||||||
|
// They are intended to be used to gain "resource slack" immediately after you
|
||||||
|
// receive a webhook from GitHub, so that you can loosely expect MinReplicas runners to be always available.
|
||||||
|
ScaleUpTriggers []ScaleUpTrigger `json:"scaleUpTriggers,omitempty"`
|
||||||
|
|
||||||
|
CapacityReservations []CapacityReservation `json:"capacityReservations,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScaleUpTrigger struct {
|
||||||
|
GitHubEvent *GitHubEventScaleUpTriggerSpec `json:"githubEvent,omitempty"`
|
||||||
|
Amount int `json:"amount,omitempty"`
|
||||||
|
Duration metav1.Duration `json:"duration,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GitHubEventScaleUpTriggerSpec struct {
|
||||||
|
CheckRun *CheckRunSpec `json:"checkRun,omitempty"`
|
||||||
|
PullRequest *PullRequestSpec `json:"pullRequest,omitempty"`
|
||||||
|
Push *PushSpec `json:"push,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||||
|
type CheckRunSpec struct {
|
||||||
|
Types []string `json:"types,omitempty"`
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
|
||||||
|
// Names is a list of GitHub Actions glob patterns.
|
||||||
|
// Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
|
||||||
|
// Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
|
||||||
|
// So it is very likely that you can utilize this to trigger depending on the job.
|
||||||
|
Names []string `json:"names,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||||
|
type PullRequestSpec struct {
|
||||||
|
Types []string `json:"types,omitempty"`
|
||||||
|
Branches []string `json:"branches,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushSpec is the condition for triggering scale-up on push event
|
||||||
|
// Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||||
|
type PushSpec struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// CapacityReservation specifies the number of replicas temporarily added
|
||||||
|
// to the scale target until ExpirationTime.
|
||||||
|
type CapacityReservation struct {
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
ExpirationTime metav1.Time `json:"expirationTime,omitempty"`
|
||||||
|
Replicas int `json:"replicas,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ScaleTargetRef struct {
|
type ScaleTargetRef struct {
|
||||||
@@ -76,6 +132,16 @@ type MetricSpec struct {
|
|||||||
// to determine how many pods should be removed.
|
// to determine how many pods should be removed.
|
||||||
// +optional
|
// +optional
|
||||||
ScaleDownFactor string `json:"scaleDownFactor,omitempty"`
|
ScaleDownFactor string `json:"scaleDownFactor,omitempty"`
|
||||||
|
|
||||||
|
// ScaleUpAdjustment is the number of runners added on scale-up.
|
||||||
|
// You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
||||||
|
// +optional
|
||||||
|
ScaleUpAdjustment int `json:"scaleUpAdjustment,omitempty"`
|
||||||
|
|
||||||
|
// ScaleDownAdjustment is the number of runners removed on scale-down.
|
||||||
|
// You can only specify either ScaleDownFactor or ScaleDownAdjustment.
|
||||||
|
// +optional
|
||||||
|
ScaleDownAdjustment int `json:"scaleDownAdjustment,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type HorizontalRunnerAutoscalerStatus struct {
|
type HorizontalRunnerAutoscalerStatus struct {
|
||||||
@@ -90,7 +156,19 @@ type HorizontalRunnerAutoscalerStatus struct {
|
|||||||
DesiredReplicas *int `json:"desiredReplicas,omitempty"`
|
DesiredReplicas *int `json:"desiredReplicas,omitempty"`
|
||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
|
// +nullable
|
||||||
LastSuccessfulScaleOutTime *metav1.Time `json:"lastSuccessfulScaleOutTime,omitempty"`
|
LastSuccessfulScaleOutTime *metav1.Time `json:"lastSuccessfulScaleOutTime,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
CacheEntries []CacheEntry `json:"cacheEntries,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const CacheEntryKeyDesiredReplicas = "desiredReplicas"
|
||||||
|
|
||||||
|
type CacheEntry struct {
|
||||||
|
Key string `json:"key,omitempty"`
|
||||||
|
Value int `json:"value,omitempty"`
|
||||||
|
ExpirationTime metav1.Time `json:"expirationTime,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ package v1alpha1
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@@ -25,6 +26,10 @@ import (
|
|||||||
|
|
||||||
// RunnerSpec defines the desired state of Runner
|
// RunnerSpec defines the desired state of Runner
|
||||||
type RunnerSpec struct {
|
type RunnerSpec struct {
|
||||||
|
// +optional
|
||||||
|
// +kubebuilder:validation:Pattern=`^[^/]+$`
|
||||||
|
Enterprise string `json:"enterprise,omitempty"`
|
||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
// +kubebuilder:validation:Pattern=`^[^/]+$`
|
// +kubebuilder:validation:Pattern=`^[^/]+$`
|
||||||
Organization string `json:"organization,omitempty"`
|
Organization string `json:"organization,omitempty"`
|
||||||
@@ -44,6 +49,8 @@ type RunnerSpec struct {
|
|||||||
// +optional
|
// +optional
|
||||||
DockerdContainerResources corev1.ResourceRequirements `json:"dockerdContainerResources,omitempty"`
|
DockerdContainerResources corev1.ResourceRequirements `json:"dockerdContainerResources,omitempty"`
|
||||||
// +optional
|
// +optional
|
||||||
|
DockerVolumeMounts []corev1.VolumeMount `json:"dockerVolumeMounts,omitempty"`
|
||||||
|
// +optional
|
||||||
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
|
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
|
||||||
// +optional
|
// +optional
|
||||||
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
|
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
|
||||||
@@ -88,16 +95,32 @@ type RunnerSpec struct {
|
|||||||
DockerdWithinRunnerContainer *bool `json:"dockerdWithinRunnerContainer,omitempty"`
|
DockerdWithinRunnerContainer *bool `json:"dockerdWithinRunnerContainer,omitempty"`
|
||||||
// +optional
|
// +optional
|
||||||
DockerEnabled *bool `json:"dockerEnabled,omitempty"`
|
DockerEnabled *bool `json:"dockerEnabled,omitempty"`
|
||||||
|
// +optional
|
||||||
|
DockerMTU *int64 `json:"dockerMTU,omitempty"`
|
||||||
|
// +optional
|
||||||
|
HostAliases []corev1.HostAlias `json:"hostAliases,omitempty"`
|
||||||
|
// +optional
|
||||||
|
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateRepository validates repository field.
|
// ValidateRepository validates repository field.
|
||||||
func (rs *RunnerSpec) ValidateRepository() error {
|
func (rs *RunnerSpec) ValidateRepository() error {
|
||||||
// Organization and repository are both exclusive.
|
// Enterprise, Organization and repository are both exclusive.
|
||||||
if len(rs.Organization) == 0 && len(rs.Repository) == 0 {
|
foundCount := 0
|
||||||
return errors.New("Spec needs organization or repository")
|
if len(rs.Organization) > 0 {
|
||||||
|
foundCount += 1
|
||||||
}
|
}
|
||||||
if len(rs.Organization) > 0 && len(rs.Repository) > 0 {
|
if len(rs.Repository) > 0 {
|
||||||
return errors.New("Spec cannot have both organization and repository")
|
foundCount += 1
|
||||||
|
}
|
||||||
|
if len(rs.Enterprise) > 0 {
|
||||||
|
foundCount += 1
|
||||||
|
}
|
||||||
|
if foundCount == 0 {
|
||||||
|
return errors.New("Spec needs enterprise, organization or repository")
|
||||||
|
}
|
||||||
|
if foundCount > 1 {
|
||||||
|
return errors.New("Spec cannot have many fields defined enterprise, organization and repository")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -105,14 +128,22 @@ func (rs *RunnerSpec) ValidateRepository() error {
|
|||||||
|
|
||||||
// RunnerStatus defines the observed state of Runner
|
// RunnerStatus defines the observed state of Runner
|
||||||
type RunnerStatus struct {
|
type RunnerStatus struct {
|
||||||
|
// +optional
|
||||||
Registration RunnerStatusRegistration `json:"registration"`
|
Registration RunnerStatusRegistration `json:"registration"`
|
||||||
Phase string `json:"phase"`
|
// +optional
|
||||||
Reason string `json:"reason"`
|
Phase string `json:"phase,omitempty"`
|
||||||
Message string `json:"message"`
|
// +optional
|
||||||
|
Reason string `json:"reason,omitempty"`
|
||||||
|
// +optional
|
||||||
|
Message string `json:"message,omitempty"`
|
||||||
|
// +optional
|
||||||
|
// +nullable
|
||||||
|
LastRegistrationCheckTime *metav1.Time `json:"lastRegistrationCheckTime,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunnerStatusRegistration contains runner registration status
|
// RunnerStatusRegistration contains runner registration status
|
||||||
type RunnerStatusRegistration struct {
|
type RunnerStatusRegistration struct {
|
||||||
|
Enterprise string `json:"enterprise,omitempty"`
|
||||||
Organization string `json:"organization,omitempty"`
|
Organization string `json:"organization,omitempty"`
|
||||||
Repository string `json:"repository,omitempty"`
|
Repository string `json:"repository,omitempty"`
|
||||||
Labels []string `json:"labels,omitempty"`
|
Labels []string `json:"labels,omitempty"`
|
||||||
@@ -122,6 +153,7 @@ type RunnerStatusRegistration struct {
|
|||||||
|
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
// +kubebuilder:subresource:status
|
// +kubebuilder:subresource:status
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".spec.enterprise",name=Enterprise,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.organization",name=Organization,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.organization",name=Organization,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.repository",name=Repository,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.repository",name=Repository,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
|
||||||
|
|||||||
@@ -25,13 +25,16 @@ const (
|
|||||||
AutoscalingMetricTypePercentageRunnersBusy = "PercentageRunnersBusy"
|
AutoscalingMetricTypePercentageRunnersBusy = "PercentageRunnersBusy"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunnerReplicaSetSpec defines the desired state of RunnerDeployment
|
// RunnerDeploymentSpec defines the desired state of RunnerDeployment
|
||||||
type RunnerDeploymentSpec struct {
|
type RunnerDeploymentSpec struct {
|
||||||
// +optional
|
// +optional
|
||||||
// +nullable
|
// +nullable
|
||||||
Replicas *int `json:"replicas,omitempty"`
|
Replicas *int `json:"replicas,omitempty"`
|
||||||
|
|
||||||
Template RunnerTemplate `json:"template"`
|
// +optional
|
||||||
|
// +nullable
|
||||||
|
Selector *metav1.LabelSelector `json:"selector"`
|
||||||
|
Template RunnerTemplate `json:"template"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RunnerDeploymentStatus struct {
|
type RunnerDeploymentStatus struct {
|
||||||
|
|||||||
@@ -26,7 +26,10 @@ type RunnerReplicaSetSpec struct {
|
|||||||
// +nullable
|
// +nullable
|
||||||
Replicas *int `json:"replicas,omitempty"`
|
Replicas *int `json:"replicas,omitempty"`
|
||||||
|
|
||||||
Template RunnerTemplate `json:"template"`
|
// +optional
|
||||||
|
// +nullable
|
||||||
|
Selector *metav1.LabelSelector `json:"selector"`
|
||||||
|
Template RunnerTemplate `json:"template"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RunnerReplicaSetStatus struct {
|
type RunnerReplicaSetStatus struct {
|
||||||
|
|||||||
@@ -22,9 +22,97 @@ package v1alpha1
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *CacheEntry) DeepCopyInto(out *CacheEntry) {
|
||||||
|
*out = *in
|
||||||
|
in.ExpirationTime.DeepCopyInto(&out.ExpirationTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheEntry.
|
||||||
|
func (in *CacheEntry) DeepCopy() *CacheEntry {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(CacheEntry)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *CapacityReservation) DeepCopyInto(out *CapacityReservation) {
|
||||||
|
*out = *in
|
||||||
|
in.ExpirationTime.DeepCopyInto(&out.ExpirationTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapacityReservation.
|
||||||
|
func (in *CapacityReservation) DeepCopy() *CapacityReservation {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(CapacityReservation)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *CheckRunSpec) DeepCopyInto(out *CheckRunSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Types != nil {
|
||||||
|
in, out := &in.Types, &out.Types
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.Names != nil {
|
||||||
|
in, out := &in.Names, &out.Names
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckRunSpec.
|
||||||
|
func (in *CheckRunSpec) DeepCopy() *CheckRunSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(CheckRunSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *GitHubEventScaleUpTriggerSpec) DeepCopyInto(out *GitHubEventScaleUpTriggerSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.CheckRun != nil {
|
||||||
|
in, out := &in.CheckRun, &out.CheckRun
|
||||||
|
*out = new(CheckRunSpec)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.PullRequest != nil {
|
||||||
|
in, out := &in.PullRequest, &out.PullRequest
|
||||||
|
*out = new(PullRequestSpec)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.Push != nil {
|
||||||
|
in, out := &in.Push, &out.Push
|
||||||
|
*out = new(PushSpec)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubEventScaleUpTriggerSpec.
|
||||||
|
func (in *GitHubEventScaleUpTriggerSpec) DeepCopy() *GitHubEventScaleUpTriggerSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(GitHubEventScaleUpTriggerSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *HorizontalRunnerAutoscaler) DeepCopyInto(out *HorizontalRunnerAutoscaler) {
|
func (in *HorizontalRunnerAutoscaler) DeepCopyInto(out *HorizontalRunnerAutoscaler) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -110,6 +198,20 @@ func (in *HorizontalRunnerAutoscalerSpec) DeepCopyInto(out *HorizontalRunnerAuto
|
|||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.ScaleUpTriggers != nil {
|
||||||
|
in, out := &in.ScaleUpTriggers, &out.ScaleUpTriggers
|
||||||
|
*out = make([]ScaleUpTrigger, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.CapacityReservations != nil {
|
||||||
|
in, out := &in.CapacityReservations, &out.CapacityReservations
|
||||||
|
*out = make([]CapacityReservation, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerSpec.
|
||||||
@@ -134,6 +236,13 @@ func (in *HorizontalRunnerAutoscalerStatus) DeepCopyInto(out *HorizontalRunnerAu
|
|||||||
in, out := &in.LastSuccessfulScaleOutTime, &out.LastSuccessfulScaleOutTime
|
in, out := &in.LastSuccessfulScaleOutTime, &out.LastSuccessfulScaleOutTime
|
||||||
*out = (*in).DeepCopy()
|
*out = (*in).DeepCopy()
|
||||||
}
|
}
|
||||||
|
if in.CacheEntries != nil {
|
||||||
|
in, out := &in.CacheEntries, &out.CacheEntries
|
||||||
|
*out = make([]CacheEntry, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerStatus.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerStatus.
|
||||||
@@ -166,6 +275,46 @@ func (in *MetricSpec) DeepCopy() *MetricSpec {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PullRequestSpec) DeepCopyInto(out *PullRequestSpec) {
|
||||||
|
*out = *in
|
||||||
|
if in.Types != nil {
|
||||||
|
in, out := &in.Types, &out.Types
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.Branches != nil {
|
||||||
|
in, out := &in.Branches, &out.Branches
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PullRequestSpec.
|
||||||
|
func (in *PullRequestSpec) DeepCopy() *PullRequestSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PullRequestSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *PushSpec) DeepCopyInto(out *PushSpec) {
|
||||||
|
*out = *in
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushSpec.
|
||||||
|
func (in *PushSpec) DeepCopy() *PushSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(PushSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *Runner) DeepCopyInto(out *Runner) {
|
func (in *Runner) DeepCopyInto(out *Runner) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -260,6 +409,11 @@ func (in *RunnerDeploymentSpec) DeepCopyInto(out *RunnerDeploymentSpec) {
|
|||||||
*out = new(int)
|
*out = new(int)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.Selector != nil {
|
||||||
|
in, out := &in.Selector, &out.Selector
|
||||||
|
*out = new(metav1.LabelSelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
in.Template.DeepCopyInto(&out.Template)
|
in.Template.DeepCopyInto(&out.Template)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -392,6 +546,11 @@ func (in *RunnerReplicaSetSpec) DeepCopyInto(out *RunnerReplicaSetSpec) {
|
|||||||
*out = new(int)
|
*out = new(int)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.Selector != nil {
|
||||||
|
in, out := &in.Selector, &out.Selector
|
||||||
|
*out = new(metav1.LabelSelector)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
in.Template.DeepCopyInto(&out.Template)
|
in.Template.DeepCopyInto(&out.Template)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -436,6 +595,13 @@ func (in *RunnerSpec) DeepCopyInto(out *RunnerSpec) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
in.DockerdContainerResources.DeepCopyInto(&out.DockerdContainerResources)
|
in.DockerdContainerResources.DeepCopyInto(&out.DockerdContainerResources)
|
||||||
|
if in.DockerVolumeMounts != nil {
|
||||||
|
in, out := &in.DockerVolumeMounts, &out.DockerVolumeMounts
|
||||||
|
*out = make([]v1.VolumeMount, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
in.Resources.DeepCopyInto(&out.Resources)
|
in.Resources.DeepCopyInto(&out.Resources)
|
||||||
if in.VolumeMounts != nil {
|
if in.VolumeMounts != nil {
|
||||||
in, out := &in.VolumeMounts, &out.VolumeMounts
|
in, out := &in.VolumeMounts, &out.VolumeMounts
|
||||||
@@ -535,6 +701,23 @@ func (in *RunnerSpec) DeepCopyInto(out *RunnerSpec) {
|
|||||||
*out = new(bool)
|
*out = new(bool)
|
||||||
**out = **in
|
**out = **in
|
||||||
}
|
}
|
||||||
|
if in.DockerMTU != nil {
|
||||||
|
in, out := &in.DockerMTU, &out.DockerMTU
|
||||||
|
*out = new(int64)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.HostAliases != nil {
|
||||||
|
in, out := &in.HostAliases, &out.HostAliases
|
||||||
|
*out = make([]v1.HostAlias, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.VolumeSizeLimit != nil {
|
||||||
|
in, out := &in.VolumeSizeLimit, &out.VolumeSizeLimit
|
||||||
|
x := (*in).DeepCopy()
|
||||||
|
*out = &x
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSpec.
|
||||||
@@ -551,6 +734,10 @@ func (in *RunnerSpec) DeepCopy() *RunnerSpec {
|
|||||||
func (in *RunnerStatus) DeepCopyInto(out *RunnerStatus) {
|
func (in *RunnerStatus) DeepCopyInto(out *RunnerStatus) {
|
||||||
*out = *in
|
*out = *in
|
||||||
in.Registration.DeepCopyInto(&out.Registration)
|
in.Registration.DeepCopyInto(&out.Registration)
|
||||||
|
if in.LastRegistrationCheckTime != nil {
|
||||||
|
in, out := &in.LastRegistrationCheckTime, &out.LastRegistrationCheckTime
|
||||||
|
*out = (*in).DeepCopy()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerStatus.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerStatus.
|
||||||
@@ -615,3 +802,24 @@ func (in *ScaleTargetRef) DeepCopy() *ScaleTargetRef {
|
|||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ScaleUpTrigger) DeepCopyInto(out *ScaleUpTrigger) {
|
||||||
|
*out = *in
|
||||||
|
if in.GitHubEvent != nil {
|
||||||
|
in, out := &in.GitHubEvent, &out.GitHubEvent
|
||||||
|
*out = new(GitHubEventScaleUpTriggerSpec)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
out.Duration = in.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleUpTrigger.
|
||||||
|
func (in *ScaleUpTrigger) DeepCopy() *ScaleUpTrigger {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ScaleUpTrigger)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|||||||
@@ -15,22 +15,20 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.2.1
|
version: 0.11.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
appVersion: 0.18.2
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
|
||||||
appVersion: 0.16.1
|
|
||||||
|
|
||||||
home: https://github.com/summerwind/actions-runner-controller
|
home: https://github.com/summerwind/actions-runner-controller
|
||||||
|
|
||||||
sources:
|
sources:
|
||||||
- https://github.com/summerwind/actions-runner-controller
|
- https://github.com/summerwind/actions-runner-controller
|
||||||
|
|
||||||
maintainers:
|
maintainers:
|
||||||
- name: summerwind
|
- name: summerwind
|
||||||
email: contact@summerwind.jp
|
email: contact@summerwind.jp
|
||||||
url: https://github.com/summerwind
|
url: https://github.com/summerwind
|
||||||
- name: funkypenguin
|
- name: funkypenguin
|
||||||
email: davidy@funkypenguin.co.nz
|
email: davidy@funkypenguin.co.nz
|
||||||
url: https://www.funkypenguin.co.nz
|
url: https://www.funkypenguin.co.nz
|
||||||
|
|||||||
@@ -22,6 +22,9 @@ resources:
|
|||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 128Mi
|
memory: 128Mi
|
||||||
|
|
||||||
|
authSecret:
|
||||||
|
create: false
|
||||||
|
|
||||||
# Set the following to true to create a dummy secret, allowing the manager pod to start
|
# Set the following to true to create a dummy secret, allowing the manager pod to start
|
||||||
# This is only useful in CI
|
# This is only useful in CI
|
||||||
createDummySecret: true
|
createDummySecret: true
|
||||||
@@ -48,6 +48,20 @@ spec:
|
|||||||
description: HorizontalRunnerAutoscalerSpec defines the desired state of
|
description: HorizontalRunnerAutoscalerSpec defines the desired state of
|
||||||
HorizontalRunnerAutoscaler
|
HorizontalRunnerAutoscaler
|
||||||
properties:
|
properties:
|
||||||
|
capacityReservations:
|
||||||
|
items:
|
||||||
|
description: CapacityReservation specifies the number of replicas
|
||||||
|
temporarily added to the scale target until ExpirationTime.
|
||||||
|
properties:
|
||||||
|
expirationTime:
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
replicas:
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
maxReplicas:
|
maxReplicas:
|
||||||
description: MinReplicas is the maximum number of replicas the deployment
|
description: MinReplicas is the maximum number of replicas the deployment
|
||||||
is allowed to scale
|
is allowed to scale
|
||||||
@@ -64,6 +78,11 @@ spec:
|
|||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
|
scaleDownAdjustment:
|
||||||
|
description: ScaleDownAdjustment is the number of runners removed
|
||||||
|
on scale-down. You can only specify either ScaleDownFactor or
|
||||||
|
ScaleDownAdjustment.
|
||||||
|
type: integer
|
||||||
scaleDownFactor:
|
scaleDownFactor:
|
||||||
description: ScaleDownFactor is the multiplicative factor applied
|
description: ScaleDownFactor is the multiplicative factor applied
|
||||||
to the current number of runners used to determine how many
|
to the current number of runners used to determine how many
|
||||||
@@ -73,6 +92,10 @@ spec:
|
|||||||
description: ScaleDownThreshold is the percentage of busy runners
|
description: ScaleDownThreshold is the percentage of busy runners
|
||||||
less than which will trigger the hpa to scale the runners down.
|
less than which will trigger the hpa to scale the runners down.
|
||||||
type: string
|
type: string
|
||||||
|
scaleUpAdjustment:
|
||||||
|
description: ScaleUpAdjustment is the number of runners added
|
||||||
|
on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
||||||
|
type: integer
|
||||||
scaleUpFactor:
|
scaleUpFactor:
|
||||||
description: ScaleUpFactor is the multiplicative factor applied
|
description: ScaleUpFactor is the multiplicative factor applied
|
||||||
to the current number of runners used to determine how many
|
to the current number of runners used to determine how many
|
||||||
@@ -104,9 +127,79 @@ spec:
|
|||||||
name:
|
name:
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
|
scaleUpTriggers:
|
||||||
|
description: "ScaleUpTriggers is an experimental feature to increase
|
||||||
|
the desired replicas by 1 on each webhook requested received by the
|
||||||
|
webhookBasedAutoscaler. \n This feature requires you to also enable
|
||||||
|
and deploy the webhookBasedAutoscaler onto your cluster. \n Note that
|
||||||
|
the added runners remain until the next sync period at least, and
|
||||||
|
they may or may not be used by GitHub Actions depending on the timing.
|
||||||
|
They are intended to be used to gain \"resource slack\" immediately
|
||||||
|
after you receive a webhook from GitHub, so that you can loosely expect
|
||||||
|
MinReplicas runners to be always available."
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
amount:
|
||||||
|
type: integer
|
||||||
|
duration:
|
||||||
|
type: string
|
||||||
|
githubEvent:
|
||||||
|
properties:
|
||||||
|
checkRun:
|
||||||
|
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||||
|
properties:
|
||||||
|
names:
|
||||||
|
description: Names is a list of GitHub Actions glob patterns.
|
||||||
|
Any check_run event whose name matches one of patterns
|
||||||
|
in the list can trigger autoscaling. Note that check_run
|
||||||
|
name seem to equal to the job name you've defined in
|
||||||
|
your actions workflow yaml file. So it is very likely
|
||||||
|
that you can utilize this to trigger depending on the
|
||||||
|
job.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
status:
|
||||||
|
type: string
|
||||||
|
types:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
|
pullRequest:
|
||||||
|
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||||
|
properties:
|
||||||
|
branches:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
types:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
|
push:
|
||||||
|
description: PushSpec is the condition for triggering scale-up
|
||||||
|
on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
type: object
|
type: object
|
||||||
status:
|
status:
|
||||||
properties:
|
properties:
|
||||||
|
cacheEntries:
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
expirationTime:
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
key:
|
||||||
|
type: string
|
||||||
|
value:
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
desiredReplicas:
|
desiredReplicas:
|
||||||
description: DesiredReplicas is the total number of desired, non-terminated
|
description: DesiredReplicas is the total number of desired, non-terminated
|
||||||
and latest pods to be set for the primary RunnerSet This doesn't include
|
and latest pods to be set for the primary RunnerSet This doesn't include
|
||||||
@@ -114,6 +207,7 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
lastSuccessfulScaleOutTime:
|
lastSuccessfulScaleOutTime:
|
||||||
format: date-time
|
format: date-time
|
||||||
|
nullable: true
|
||||||
type: string
|
type: string
|
||||||
observedGeneration:
|
observedGeneration:
|
||||||
description: ObservedGeneration is the most recent generation observed
|
description: ObservedGeneration is the most recent generation observed
|
||||||
|
|||||||
@@ -38,11 +38,42 @@ spec:
|
|||||||
metadata:
|
metadata:
|
||||||
type: object
|
type: object
|
||||||
spec:
|
spec:
|
||||||
description: RunnerReplicaSetSpec defines the desired state of RunnerDeployment
|
description: RunnerDeploymentSpec defines the desired state of RunnerDeployment
|
||||||
properties:
|
properties:
|
||||||
replicas:
|
replicas:
|
||||||
nullable: true
|
nullable: true
|
||||||
type: integer
|
type: integer
|
||||||
|
selector:
|
||||||
|
description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.
|
||||||
|
nullable: true
|
||||||
|
properties:
|
||||||
|
matchExpressions:
|
||||||
|
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||||
|
items:
|
||||||
|
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||||
|
properties:
|
||||||
|
key:
|
||||||
|
description: key is the label key that the selector applies to.
|
||||||
|
type: string
|
||||||
|
operator:
|
||||||
|
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||||
|
type: string
|
||||||
|
values:
|
||||||
|
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- key
|
||||||
|
- operator
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
matchLabels:
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
template:
|
template:
|
||||||
properties:
|
properties:
|
||||||
metadata:
|
metadata:
|
||||||
@@ -402,6 +433,36 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
dockerMTU:
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
dockerVolumeMounts:
|
||||||
|
items:
|
||||||
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
properties:
|
||||||
|
mountPath:
|
||||||
|
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||||
|
type: string
|
||||||
|
mountPropagation:
|
||||||
|
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
description: This must match the Name of a Volume.
|
||||||
|
type: string
|
||||||
|
readOnly:
|
||||||
|
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||||
|
type: boolean
|
||||||
|
subPath:
|
||||||
|
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||||
|
type: string
|
||||||
|
subPathExpr:
|
||||||
|
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- mountPath
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
dockerdContainerResources:
|
dockerdContainerResources:
|
||||||
description: ResourceRequirements describes the compute resource requirements.
|
description: ResourceRequirements describes the compute resource requirements.
|
||||||
properties:
|
properties:
|
||||||
@@ -426,6 +487,9 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
dockerdWithinRunnerContainer:
|
dockerdWithinRunnerContainer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enterprise:
|
||||||
|
pattern: ^[^/]+$
|
||||||
|
type: string
|
||||||
env:
|
env:
|
||||||
items:
|
items:
|
||||||
description: EnvVar represents an environment variable present in a Container.
|
description: EnvVar represents an environment variable present in a Container.
|
||||||
@@ -543,6 +607,20 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
|
hostAliases:
|
||||||
|
items:
|
||||||
|
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||||
|
properties:
|
||||||
|
hostnames:
|
||||||
|
description: Hostnames for the above IP address.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
ip:
|
||||||
|
description: IP address of the host file entry.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
image:
|
image:
|
||||||
type: string
|
type: string
|
||||||
imagePullPolicy:
|
imagePullPolicy:
|
||||||
@@ -731,6 +809,12 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
volumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
volumes:
|
volumes:
|
||||||
items:
|
items:
|
||||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||||
|
|||||||
@@ -43,6 +43,37 @@ spec:
|
|||||||
replicas:
|
replicas:
|
||||||
nullable: true
|
nullable: true
|
||||||
type: integer
|
type: integer
|
||||||
|
selector:
|
||||||
|
description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.
|
||||||
|
nullable: true
|
||||||
|
properties:
|
||||||
|
matchExpressions:
|
||||||
|
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||||
|
items:
|
||||||
|
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||||
|
properties:
|
||||||
|
key:
|
||||||
|
description: key is the label key that the selector applies to.
|
||||||
|
type: string
|
||||||
|
operator:
|
||||||
|
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||||
|
type: string
|
||||||
|
values:
|
||||||
|
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- key
|
||||||
|
- operator
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
matchLabels:
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
template:
|
template:
|
||||||
properties:
|
properties:
|
||||||
metadata:
|
metadata:
|
||||||
@@ -402,6 +433,36 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
dockerMTU:
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
dockerVolumeMounts:
|
||||||
|
items:
|
||||||
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
properties:
|
||||||
|
mountPath:
|
||||||
|
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||||
|
type: string
|
||||||
|
mountPropagation:
|
||||||
|
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
description: This must match the Name of a Volume.
|
||||||
|
type: string
|
||||||
|
readOnly:
|
||||||
|
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||||
|
type: boolean
|
||||||
|
subPath:
|
||||||
|
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||||
|
type: string
|
||||||
|
subPathExpr:
|
||||||
|
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- mountPath
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
dockerdContainerResources:
|
dockerdContainerResources:
|
||||||
description: ResourceRequirements describes the compute resource requirements.
|
description: ResourceRequirements describes the compute resource requirements.
|
||||||
properties:
|
properties:
|
||||||
@@ -426,6 +487,9 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
dockerdWithinRunnerContainer:
|
dockerdWithinRunnerContainer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enterprise:
|
||||||
|
pattern: ^[^/]+$
|
||||||
|
type: string
|
||||||
env:
|
env:
|
||||||
items:
|
items:
|
||||||
description: EnvVar represents an environment variable present in a Container.
|
description: EnvVar represents an environment variable present in a Container.
|
||||||
@@ -543,6 +607,20 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
|
hostAliases:
|
||||||
|
items:
|
||||||
|
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||||
|
properties:
|
||||||
|
hostnames:
|
||||||
|
description: Hostnames for the above IP address.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
ip:
|
||||||
|
description: IP address of the host file entry.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
image:
|
image:
|
||||||
type: string
|
type: string
|
||||||
imagePullPolicy:
|
imagePullPolicy:
|
||||||
@@ -731,6 +809,12 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
volumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
volumes:
|
volumes:
|
||||||
items:
|
items:
|
||||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||||
|
|||||||
@@ -7,6 +7,9 @@ metadata:
|
|||||||
name: runners.actions.summerwind.dev
|
name: runners.actions.summerwind.dev
|
||||||
spec:
|
spec:
|
||||||
additionalPrinterColumns:
|
additionalPrinterColumns:
|
||||||
|
- JSONPath: .spec.enterprise
|
||||||
|
name: Enterprise
|
||||||
|
type: string
|
||||||
- JSONPath: .spec.organization
|
- JSONPath: .spec.organization
|
||||||
name: Organization
|
name: Organization
|
||||||
type: string
|
type: string
|
||||||
@@ -395,6 +398,36 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
dockerMTU:
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
dockerVolumeMounts:
|
||||||
|
items:
|
||||||
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
properties:
|
||||||
|
mountPath:
|
||||||
|
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||||
|
type: string
|
||||||
|
mountPropagation:
|
||||||
|
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
description: This must match the Name of a Volume.
|
||||||
|
type: string
|
||||||
|
readOnly:
|
||||||
|
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||||
|
type: boolean
|
||||||
|
subPath:
|
||||||
|
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||||
|
type: string
|
||||||
|
subPathExpr:
|
||||||
|
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- mountPath
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
dockerdContainerResources:
|
dockerdContainerResources:
|
||||||
description: ResourceRequirements describes the compute resource requirements.
|
description: ResourceRequirements describes the compute resource requirements.
|
||||||
properties:
|
properties:
|
||||||
@@ -419,6 +452,9 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
dockerdWithinRunnerContainer:
|
dockerdWithinRunnerContainer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enterprise:
|
||||||
|
pattern: ^[^/]+$
|
||||||
|
type: string
|
||||||
env:
|
env:
|
||||||
items:
|
items:
|
||||||
description: EnvVar represents an environment variable present in a Container.
|
description: EnvVar represents an environment variable present in a Container.
|
||||||
@@ -536,6 +572,20 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
|
hostAliases:
|
||||||
|
items:
|
||||||
|
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||||
|
properties:
|
||||||
|
hostnames:
|
||||||
|
description: Hostnames for the above IP address.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
ip:
|
||||||
|
description: IP address of the host file entry.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
image:
|
image:
|
||||||
type: string
|
type: string
|
||||||
imagePullPolicy:
|
imagePullPolicy:
|
||||||
@@ -724,6 +774,12 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
volumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
volumes:
|
volumes:
|
||||||
items:
|
items:
|
||||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||||
@@ -1532,6 +1588,10 @@ spec:
|
|||||||
status:
|
status:
|
||||||
description: RunnerStatus defines the observed state of Runner
|
description: RunnerStatus defines the observed state of Runner
|
||||||
properties:
|
properties:
|
||||||
|
lastRegistrationCheckTime:
|
||||||
|
format: date-time
|
||||||
|
nullable: true
|
||||||
|
type: string
|
||||||
message:
|
message:
|
||||||
type: string
|
type: string
|
||||||
phase:
|
phase:
|
||||||
@@ -1541,6 +1601,8 @@ spec:
|
|||||||
registration:
|
registration:
|
||||||
description: RunnerStatusRegistration contains runner registration status
|
description: RunnerStatusRegistration contains runner registration status
|
||||||
properties:
|
properties:
|
||||||
|
enterprise:
|
||||||
|
type: string
|
||||||
expiresAt:
|
expiresAt:
|
||||||
format: date-time
|
format: date-time
|
||||||
type: string
|
type: string
|
||||||
@@ -1558,11 +1620,6 @@ spec:
|
|||||||
- expiresAt
|
- expiresAt
|
||||||
- token
|
- token
|
||||||
type: object
|
type: object
|
||||||
required:
|
|
||||||
- message
|
|
||||||
- phase
|
|
||||||
- reason
|
|
||||||
- registration
|
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
version: v1alpha1
|
version: v1alpha1
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
1. Get the application URL by running these commands:
|
1. Get the application URL by running these commands:
|
||||||
{{- if .Values.ingress.enabled }}
|
{{- if .Values.githubWebhookServer.ingress.enabled }}
|
||||||
{{- range $host := .Values.ingress.hosts }}
|
{{- range $host := .Values.githubWebhookServer.ingress.hosts }}
|
||||||
{{- range .paths }}
|
{{- range .paths }}
|
||||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
|
http{{ if $.Values.githubWebhookServer.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- else if contains "NodePort" .Values.service.type }}
|
{{- else if contains "NodePort" .Values.service.type }}
|
||||||
|
|||||||
@@ -0,0 +1,56 @@
|
|||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.githubWebhookServer.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.instance" -}}
|
||||||
|
{{- printf "%s-%s" .Release.Name "github-webhook-server" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.fullname" -}}
|
||||||
|
{{- if .Values.githubWebhookServer.fullnameOverride }}
|
||||||
|
{{- .Values.githubWebhookServer.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- $name := default .Chart.Name .Values.githubWebhookServer.nameOverride }}
|
||||||
|
{{- $instance := include "actions-runner-controller-github-webhook-server.instance" . }}
|
||||||
|
{{- if contains $name $instance }}
|
||||||
|
{{- $instance | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- printf "%s-%s-%s" .Release.Name $name "github-webhook-server" | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Selector labels
|
||||||
|
*/}}
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.selectorLabels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "actions-runner-controller-github-webhook-server.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ include "actions-runner-controller-github-webhook-server.instance" . }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.serviceAccountName" -}}
|
||||||
|
{{- if .Values.githubWebhookServer.serviceAccount.create }}
|
||||||
|
{{- default (include "actions-runner-controller-github-webhook-server.fullname" .) .Values.githubWebhookServer.serviceAccount.name }}
|
||||||
|
{{- else }}
|
||||||
|
{{- default "default" .Values.githubWebhookServer.serviceAccount.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.secretName" -}}
|
||||||
|
{{- default (include "actions-runner-controller-github-webhook-server.fullname" .) .Values.githubWebhookServer.secret.name }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.roleName" -}}
|
||||||
|
{{- include "actions-runner-controller-github-webhook-server.fullname" . }}
|
||||||
|
{{- end }}
|
||||||
@@ -64,6 +64,10 @@ Create the name of the service account to use
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "actions-runner-controller.secretName" -}}
|
||||||
|
{{- default (include "actions-runner-controller.fullname" .) .Values.authSecret.name -}}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
{{- define "actions-runner-controller.leaderElectionRoleName" -}}
|
{{- define "actions-runner-controller.leaderElectionRoleName" -}}
|
||||||
{{- include "actions-runner-controller.fullname" . }}-leader-election
|
{{- include "actions-runner-controller.fullname" . }}-leader-election
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -85,11 +89,11 @@ Create the name of the service account to use
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "actions-runner-controller.webhookServiceName" -}}
|
{{- define "actions-runner-controller.webhookServiceName" -}}
|
||||||
{{- include "actions-runner-controller.fullname" . }}-webhook
|
{{- include "actions-runner-controller.fullname" . | trunc 55 }}-webhook
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "actions-runner-controller.authProxyServiceName" -}}
|
{{- define "actions-runner-controller.authProxyServiceName" -}}
|
||||||
{{- include "actions-runner-controller.fullname" . }}-metrics-service
|
{{- include "actions-runner-controller.fullname" . | trunc 47 }}-metrics-service
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "actions-runner-controller.selfsignedIssuerName" -}}
|
{{- define "actions-runner-controller.selfsignedIssuerName" -}}
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ apiVersion: cert-manager.io/v1
|
|||||||
kind: Issuer
|
kind: Issuer
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "actions-runner-controller.selfsignedIssuerName" . }}
|
name: {{ include "actions-runner-controller.selfsignedIssuerName" . }}
|
||||||
namespace: {{ .Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
spec:
|
spec:
|
||||||
selfSigned: {}
|
selfSigned: {}
|
||||||
---
|
---
|
||||||
@@ -13,7 +13,7 @@ apiVersion: cert-manager.io/v1
|
|||||||
kind: Certificate
|
kind: Certificate
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ include "actions-runner-controller.servingCertName" . }}
|
name: {{ include "actions-runner-controller.servingCertName" . }}
|
||||||
namespace: {{ .Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
spec:
|
spec:
|
||||||
dnsNames:
|
dnsNames:
|
||||||
- {{ include "actions-runner-controller.webhookServiceName" . }}.{{ .Release.Namespace }}.svc
|
- {{ include "actions-runner-controller.webhookServiceName" . }}.{{ .Release.Namespace }}.svc
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ metadata:
|
|||||||
labels:
|
labels:
|
||||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
spec:
|
spec:
|
||||||
|
replicas: {{ .Values.replicaCount }}
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
{{- include "actions-runner-controller.selectorLabels" . | nindent 6 }}
|
{{- include "actions-runner-controller.selectorLabels" . | nindent 6 }}
|
||||||
@@ -34,6 +35,9 @@ spec:
|
|||||||
- "--enable-leader-election"
|
- "--enable-leader-election"
|
||||||
- "--sync-period={{ .Values.syncPeriod }}"
|
- "--sync-period={{ .Values.syncPeriod }}"
|
||||||
- "--docker-image={{ .Values.image.dindSidecarRepositoryAndTag }}"
|
- "--docker-image={{ .Values.image.dindSidecarRepositoryAndTag }}"
|
||||||
|
{{- if .Values.scope.singleNamespace }}
|
||||||
|
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
|
||||||
|
{{- end }}
|
||||||
command:
|
command:
|
||||||
- "/manager"
|
- "/manager"
|
||||||
env:
|
env:
|
||||||
@@ -41,19 +45,19 @@ spec:
|
|||||||
valueFrom:
|
valueFrom:
|
||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
key: github_token
|
key: github_token
|
||||||
name: controller-manager
|
name: {{ include "actions-runner-controller.secretName" . }}
|
||||||
optional: true
|
optional: true
|
||||||
- name: GITHUB_APP_ID
|
- name: GITHUB_APP_ID
|
||||||
valueFrom:
|
valueFrom:
|
||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
key: github_app_id
|
key: github_app_id
|
||||||
name: controller-manager
|
name: {{ include "actions-runner-controller.secretName" . }}
|
||||||
optional: true
|
optional: true
|
||||||
- name: GITHUB_APP_INSTALLATION_ID
|
- name: GITHUB_APP_INSTALLATION_ID
|
||||||
valueFrom:
|
valueFrom:
|
||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
key: github_app_installation_id
|
key: github_app_installation_id
|
||||||
name: controller-manager
|
name: {{ include "actions-runner-controller.secretName" . }}
|
||||||
optional: true
|
optional: true
|
||||||
- name: GITHUB_APP_PRIVATE_KEY
|
- name: GITHUB_APP_PRIVATE_KEY
|
||||||
value: /etc/actions-runner-controller/github_app_private_key
|
value: /etc/actions-runner-controller/github_app_private_key
|
||||||
@@ -71,13 +75,13 @@ spec:
|
|||||||
resources:
|
resources:
|
||||||
{{- toYaml .Values.resources | nindent 12 }}
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
securityContext:
|
securityContext:
|
||||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: "/etc/actions-runner-controller"
|
- mountPath: "/etc/actions-runner-controller"
|
||||||
name: controller-manager
|
name: secret
|
||||||
readOnly: true
|
readOnly: true
|
||||||
- mountPath: /tmp
|
- mountPath: /tmp
|
||||||
name: tmp
|
name: tmp
|
||||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||||
name: cert
|
name: cert
|
||||||
readOnly: true
|
readOnly: true
|
||||||
@@ -93,14 +97,14 @@ spec:
|
|||||||
- containerPort: 8443
|
- containerPort: 8443
|
||||||
name: https
|
name: https
|
||||||
resources:
|
resources:
|
||||||
{{- toYaml .Values.resources | nindent 12 }}
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
securityContext:
|
securityContext:
|
||||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
terminationGracePeriodSeconds: 10
|
terminationGracePeriodSeconds: 10
|
||||||
volumes:
|
volumes:
|
||||||
- name: controller-manager
|
- name: secret
|
||||||
secret:
|
secret:
|
||||||
secretName: controller-manager
|
secretName: {{ include "actions-runner-controller.secretName" . }}
|
||||||
- name: cert
|
- name: cert
|
||||||
secret:
|
secret:
|
||||||
defaultMode: 420
|
defaultMode: 420
|
||||||
|
|||||||
@@ -0,0 +1,89 @@
|
|||||||
|
{{- if .Values.githubWebhookServer.enabled }}
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.fullname" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
replicas: {{ .Values.githubWebhookServer.replicaCount }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 6 }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
{{- with .Values.githubWebhookServer.podAnnotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 8 }}
|
||||||
|
spec:
|
||||||
|
{{- with .Values.githubWebhookServer.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
serviceAccountName: {{ include "actions-runner-controller-github-webhook-server.serviceAccountName" . }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.githubWebhookServer.podSecurityContext | nindent 8 }}
|
||||||
|
{{- with .Values.githubWebhookServer.priorityClassName }}
|
||||||
|
priorityClassName: "{{ . }}"
|
||||||
|
{{- end }}
|
||||||
|
containers:
|
||||||
|
- args:
|
||||||
|
- "--metrics-addr=127.0.0.1:8080"
|
||||||
|
- "--sync-period={{ .Values.githubWebhookServer.syncPeriod }}"
|
||||||
|
command:
|
||||||
|
- "/github-webhook-server"
|
||||||
|
env:
|
||||||
|
- name: GITHUB_WEBHOOK_SECRET_TOKEN
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
key: github_webhook_secret_token
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.secretName" . }}
|
||||||
|
optional: true
|
||||||
|
{{- range $key, $val := .Values.githubWebhookServer.env }}
|
||||||
|
- name: {{ $key }}
|
||||||
|
value: {{ $val | quote }}
|
||||||
|
{{- end }}
|
||||||
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||||
|
name: github-webhook-server
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
ports:
|
||||||
|
- containerPort: 8000
|
||||||
|
name: http
|
||||||
|
protocol: TCP
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.githubWebhookServer.resources | nindent 12 }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.githubWebhookServer.securityContext | nindent 12 }}
|
||||||
|
- args:
|
||||||
|
- "--secure-listen-address=0.0.0.0:8443"
|
||||||
|
- "--upstream=http://127.0.0.1:8080/"
|
||||||
|
- "--logtostderr=true"
|
||||||
|
- "--v=10"
|
||||||
|
image: "{{ .Values.kube_rbac_proxy.image.repository }}:{{ .Values.kube_rbac_proxy.image.tag }}"
|
||||||
|
name: kube-rbac-proxy
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
ports:
|
||||||
|
- containerPort: 8443
|
||||||
|
name: https
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
|
terminationGracePeriodSeconds: 10
|
||||||
|
{{- with .Values.githubWebhookServer.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.githubWebhookServer.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.githubWebhookServer.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,41 @@
|
|||||||
|
{{- if .Values.githubWebhookServer.ingress.enabled -}}
|
||||||
|
{{- $fullName := include "actions-runner-controller-github-webhook-server.fullname" . -}}
|
||||||
|
{{- $svcPort := (index .Values.githubWebhookServer.service.ports 0).port -}}
|
||||||
|
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||||
|
apiVersion: networking.k8s.io/v1beta1
|
||||||
|
{{- else -}}
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
{{- end }}
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: {{ $fullName }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.githubWebhookServer.ingress.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
{{- if .Values.githubWebhookServer.ingress.tls }}
|
||||||
|
tls:
|
||||||
|
{{- range .Values.githubWebhookServer.ingress.tls }}
|
||||||
|
- hosts:
|
||||||
|
{{- range .hosts }}
|
||||||
|
- {{ . | quote }}
|
||||||
|
{{- end }}
|
||||||
|
secretName: {{ .secretName }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
rules:
|
||||||
|
{{- range .Values.githubWebhookServer.ingress.hosts }}
|
||||||
|
- host: {{ .host | quote }}
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
{{- range .paths }}
|
||||||
|
- path: {{ .path }}
|
||||||
|
backend:
|
||||||
|
serviceName: {{ $fullName }}
|
||||||
|
servicePort: {{ $svcPort }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,70 @@
|
|||||||
|
{{- if .Values.githubWebhookServer.enabled }}
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.roleName" . }}
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- horizontalrunnerautoscalers
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- horizontalrunnerautoscalers/finalizers
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- horizontalrunnerautoscalers/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnerdeployments
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnerdeployments/finalizers
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnerdeployments/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
{{- if .Values.githubWebhookServer.enabled }}
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.roleName" . }}
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.roleName" . }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.serviceAccountName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
{{- if .Values.githubWebhookServer.enabled }}
|
||||||
|
{{- if .Values.githubWebhookServer.secret.create }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.secretName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
type: Opaque
|
||||||
|
data:
|
||||||
|
{{- if .Values.githubWebhookServer.secret.github_webhook_secret_token }}
|
||||||
|
github_webhook_secret_token: {{ .Values.githubWebhookServer.secret.github_webhook_secret_token | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
{{- if .Values.githubWebhookServer.enabled }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.fullname" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.githubWebhookServer.service.type }}
|
||||||
|
ports:
|
||||||
|
{{ range $_, $port := .Values.githubWebhookServer.service.ports -}}
|
||||||
|
- {{ $port | toYaml | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
selector:
|
||||||
|
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
{{- if .Values.githubWebhookServer.enabled -}}
|
||||||
|
{{- if .Values.githubWebhookServer.serviceAccount.create -}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.serviceAccountName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.githubWebhookServer.serviceAccount.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
@@ -1,14 +1,23 @@
|
|||||||
{{- if or .Values.authSecret.enabled }}
|
{{- if .Values.authSecret.create }}
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Secret
|
kind: Secret
|
||||||
metadata:
|
metadata:
|
||||||
name: controller-manager
|
name: {{ include "actions-runner-controller.secretName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
type: Opaque
|
type: Opaque
|
||||||
data:
|
data:
|
||||||
{{- range $k, $v := .Values.authSecret }}
|
{{- if .Values.authSecret.github_app_id }}
|
||||||
{{ $k }}: {{ $v | toString | b64enc }}
|
github_app_id: {{ .Values.authSecret.github_app_id | toString | b64enc }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- if .Values.authSecret.github_app_installation_id }}
|
||||||
|
github_app_installation_id: {{ .Values.authSecret.github_app_installation_id | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.authSecret.github_app_private_key }}
|
||||||
|
github_app_private_key: {{ .Values.authSecret.github_app_private_key | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.authSecret.github_token }}
|
||||||
|
github_token: {{ .Values.authSecret.github_token | toString | b64enc }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -11,7 +11,8 @@ syncPeriod: 10m
|
|||||||
# Only 1 authentication method can be deployed at a time
|
# Only 1 authentication method can be deployed at a time
|
||||||
# Uncomment the configuration you are applying and fill in the details
|
# Uncomment the configuration you are applying and fill in the details
|
||||||
authSecret:
|
authSecret:
|
||||||
enabled: false
|
create: true
|
||||||
|
name: "controller-manager"
|
||||||
### GitHub Apps Configuration
|
### GitHub Apps Configuration
|
||||||
#github_app_id: ""
|
#github_app_id: ""
|
||||||
#github_app_installation_id: ""
|
#github_app_installation_id: ""
|
||||||
@@ -21,15 +22,13 @@ authSecret:
|
|||||||
|
|
||||||
image:
|
image:
|
||||||
repository: summerwind/actions-runner-controller
|
repository: summerwind/actions-runner-controller
|
||||||
# Overrides the manager image tag whose default is the chart appVersion if the tag key is commented out
|
|
||||||
tag: "latest"
|
|
||||||
dindSidecarRepositoryAndTag: "docker:dind"
|
dindSidecarRepositoryAndTag: "docker:dind"
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
kube_rbac_proxy:
|
kube_rbac_proxy:
|
||||||
image:
|
image:
|
||||||
repository: gcr.io/kubebuilder/kube-rbac-proxy
|
repository: quay.io/brancz/kube-rbac-proxy
|
||||||
tag: v0.4.1
|
tag: v0.8.0
|
||||||
|
|
||||||
imagePullSecrets: []
|
imagePullSecrets: []
|
||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
@@ -46,10 +45,12 @@ serviceAccount:
|
|||||||
|
|
||||||
podAnnotations: {}
|
podAnnotations: {}
|
||||||
|
|
||||||
podSecurityContext: {}
|
podSecurityContext:
|
||||||
|
{}
|
||||||
# fsGroup: 2000
|
# fsGroup: 2000
|
||||||
|
|
||||||
securityContext: {}
|
securityContext:
|
||||||
|
{}
|
||||||
# capabilities:
|
# capabilities:
|
||||||
# drop:
|
# drop:
|
||||||
# - ALL
|
# - ALL
|
||||||
@@ -61,20 +62,8 @@ service:
|
|||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
port: 443
|
port: 443
|
||||||
|
|
||||||
ingress:
|
resources:
|
||||||
enabled: false
|
{}
|
||||||
annotations: {}
|
|
||||||
# kubernetes.io/ingress.class: nginx
|
|
||||||
# kubernetes.io/tls-acme: "true"
|
|
||||||
hosts:
|
|
||||||
- host: chart-example.local
|
|
||||||
paths: []
|
|
||||||
tls: []
|
|
||||||
# - secretName: chart-example-tls
|
|
||||||
# hosts:
|
|
||||||
# - chart-example.local
|
|
||||||
|
|
||||||
resources: {}
|
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||||
# choice for the user. This also increases chances charts run on environments with little
|
# choice for the user. This also increases chances charts run on environments with little
|
||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||||
@@ -104,7 +93,67 @@ affinity: {}
|
|||||||
# PriorityClass: system-cluster-critical
|
# PriorityClass: system-cluster-critical
|
||||||
priorityClassName: ""
|
priorityClassName: ""
|
||||||
|
|
||||||
env: {}
|
env:
|
||||||
|
{}
|
||||||
# http_proxy: "proxy.com:8080"
|
# http_proxy: "proxy.com:8080"
|
||||||
# https_proxy: "proxy.com:8080"
|
# https_proxy: "proxy.com:8080"
|
||||||
# no_proxy: ""
|
# no_proxy: ""
|
||||||
|
|
||||||
|
scope:
|
||||||
|
# If true, the controller will only watch custom resources in a single namespace
|
||||||
|
singleNamespace: false
|
||||||
|
# If `scope.singleNamespace=true`, the controller will only watch custom resources in this namespace
|
||||||
|
# The default value is "", which means the namespace of the controller
|
||||||
|
watchNamespace: ""
|
||||||
|
|
||||||
|
githubWebhookServer:
|
||||||
|
enabled: false
|
||||||
|
labels: {}
|
||||||
|
replicaCount: 1
|
||||||
|
syncPeriod: 10m
|
||||||
|
secret:
|
||||||
|
create: true
|
||||||
|
name: "github-webhook-server"
|
||||||
|
### GitHub Webhook Configuration
|
||||||
|
#github_webhook_secret_token: ""
|
||||||
|
imagePullSecrets: []
|
||||||
|
nameOverride: ""
|
||||||
|
fullnameOverride: ""
|
||||||
|
serviceAccount:
|
||||||
|
# Specifies whether a service account should be created
|
||||||
|
create: true
|
||||||
|
# Annotations to add to the service account
|
||||||
|
annotations: {}
|
||||||
|
# The name of the service account to use.
|
||||||
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
|
name: ""
|
||||||
|
podAnnotations: {}
|
||||||
|
podSecurityContext: {}
|
||||||
|
# fsGroup: 2000
|
||||||
|
securityContext: {}
|
||||||
|
resources: {}
|
||||||
|
nodeSelector: {}
|
||||||
|
tolerations: []
|
||||||
|
affinity: {}
|
||||||
|
priorityClassName: ""
|
||||||
|
service:
|
||||||
|
type: ClusterIP
|
||||||
|
ports:
|
||||||
|
- port: 80
|
||||||
|
targetPort: http
|
||||||
|
protocol: TCP
|
||||||
|
name: http
|
||||||
|
#nodePort: someFixedPortForUseWithTerraformCdkCfnEtc
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
annotations:
|
||||||
|
{}
|
||||||
|
# kubernetes.io/ingress.class: nginx
|
||||||
|
# kubernetes.io/tls-acme: "true"
|
||||||
|
hosts:
|
||||||
|
- host: chart-example.local
|
||||||
|
paths: []
|
||||||
|
tls: []
|
||||||
|
# - secretName: chart-example-tls
|
||||||
|
# hosts:
|
||||||
|
# - chart-example.local
|
||||||
|
|||||||
169
cmd/githubwebhookserver/main.go
Normal file
169
cmd/githubwebhookserver/main.go
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2021 The actions-runner-controller authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/summerwind/actions-runner-controller/controllers"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
|
_ "k8s.io/client-go/plugin/pkg/client/auth/exec"
|
||||||
|
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||||
|
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||||
|
// +kubebuilder:scaffold:imports
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
scheme = runtime.NewScheme()
|
||||||
|
setupLog = ctrl.Log.WithName("setup")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
_ = clientgoscheme.AddToScheme(scheme)
|
||||||
|
|
||||||
|
_ = actionsv1alpha1.AddToScheme(scheme)
|
||||||
|
// +kubebuilder:scaffold:scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
|
||||||
|
webhookAddr string
|
||||||
|
metricsAddr string
|
||||||
|
|
||||||
|
// The secret token of the GitHub Webhook. See https://docs.github.com/en/developers/webhooks-and-events/securing-your-webhooks
|
||||||
|
webhookSecretToken string
|
||||||
|
|
||||||
|
watchNamespace string
|
||||||
|
|
||||||
|
enableLeaderElection bool
|
||||||
|
syncPeriod time.Duration
|
||||||
|
)
|
||||||
|
|
||||||
|
webhookSecretToken = os.Getenv("GITHUB_WEBHOOK_SECRET_TOKEN")
|
||||||
|
|
||||||
|
flag.StringVar(&webhookAddr, "webhook-addr", ":8000", "The address the metric endpoint binds to.")
|
||||||
|
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
|
||||||
|
flag.StringVar(&watchNamespace, "watch-namespace", "", "The namespace to watch for HorizontalRunnerAutoscaler's to scale on Webhook. Set to empty for letting it watch for all namespaces.")
|
||||||
|
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
||||||
|
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||||
|
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if webhookSecretToken == "" {
|
||||||
|
setupLog.Info("-webhook-secret-token is missing or empty. Create one following https://docs.github.com/en/developers/webhooks-and-events/securing-your-webhooks")
|
||||||
|
}
|
||||||
|
|
||||||
|
if watchNamespace == "" {
|
||||||
|
setupLog.Info("-watch-namespace is empty. HorizontalRunnerAutoscalers in all the namespaces are watched, cached, and considered as scale targets.")
|
||||||
|
} else {
|
||||||
|
setupLog.Info("-watch-namespace is %q. Only HorizontalRunnerAutoscalers in %q are watched, cached, and considered as scale targets.")
|
||||||
|
}
|
||||||
|
|
||||||
|
logger := zap.New(func(o *zap.Options) {
|
||||||
|
o.Development = true
|
||||||
|
})
|
||||||
|
|
||||||
|
ctrl.SetLogger(logger)
|
||||||
|
|
||||||
|
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||||
|
Scheme: scheme,
|
||||||
|
SyncPeriod: &syncPeriod,
|
||||||
|
LeaderElection: enableLeaderElection,
|
||||||
|
Namespace: watchNamespace,
|
||||||
|
MetricsBindAddress: metricsAddr,
|
||||||
|
Port: 9443,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
setupLog.Error(err, "unable to start manager")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
hraGitHubWebhook := &controllers.HorizontalRunnerAutoscalerGitHubWebhook{
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
Log: ctrl.Log.WithName("controllers").WithName("Runner"),
|
||||||
|
Recorder: nil,
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
|
SecretKeyBytes: []byte(webhookSecretToken),
|
||||||
|
Namespace: watchNamespace,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = hraGitHubWebhook.SetupWithManager(mgr); err != nil {
|
||||||
|
setupLog.Error(err, "unable to create controller", "controller", "Runner")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer cancel()
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
setupLog.Info("starting webhook server")
|
||||||
|
if err := mgr.Start(ctx.Done()); err != nil {
|
||||||
|
setupLog.Error(err, "problem running manager")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.HandleFunc("/", hraGitHubWebhook.Handle)
|
||||||
|
|
||||||
|
srv := http.Server{
|
||||||
|
Addr: webhookAddr,
|
||||||
|
Handler: mux,
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer cancel()
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
|
||||||
|
srv.Shutdown(context.Background())
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := srv.ListenAndServe(); err != nil {
|
||||||
|
if !errors.Is(err, http.ErrServerClosed) {
|
||||||
|
setupLog.Error(err, "problem running http server")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-ctrl.SetupSignalHandler()
|
||||||
|
cancel()
|
||||||
|
}()
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
@@ -48,6 +48,20 @@ spec:
|
|||||||
description: HorizontalRunnerAutoscalerSpec defines the desired state of
|
description: HorizontalRunnerAutoscalerSpec defines the desired state of
|
||||||
HorizontalRunnerAutoscaler
|
HorizontalRunnerAutoscaler
|
||||||
properties:
|
properties:
|
||||||
|
capacityReservations:
|
||||||
|
items:
|
||||||
|
description: CapacityReservation specifies the number of replicas
|
||||||
|
temporarily added to the scale target until ExpirationTime.
|
||||||
|
properties:
|
||||||
|
expirationTime:
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
replicas:
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
maxReplicas:
|
maxReplicas:
|
||||||
description: MinReplicas is the maximum number of replicas the deployment
|
description: MinReplicas is the maximum number of replicas the deployment
|
||||||
is allowed to scale
|
is allowed to scale
|
||||||
@@ -64,6 +78,11 @@ spec:
|
|||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
|
scaleDownAdjustment:
|
||||||
|
description: ScaleDownAdjustment is the number of runners removed
|
||||||
|
on scale-down. You can only specify either ScaleDownFactor or
|
||||||
|
ScaleDownAdjustment.
|
||||||
|
type: integer
|
||||||
scaleDownFactor:
|
scaleDownFactor:
|
||||||
description: ScaleDownFactor is the multiplicative factor applied
|
description: ScaleDownFactor is the multiplicative factor applied
|
||||||
to the current number of runners used to determine how many
|
to the current number of runners used to determine how many
|
||||||
@@ -73,6 +92,10 @@ spec:
|
|||||||
description: ScaleDownThreshold is the percentage of busy runners
|
description: ScaleDownThreshold is the percentage of busy runners
|
||||||
less than which will trigger the hpa to scale the runners down.
|
less than which will trigger the hpa to scale the runners down.
|
||||||
type: string
|
type: string
|
||||||
|
scaleUpAdjustment:
|
||||||
|
description: ScaleUpAdjustment is the number of runners added
|
||||||
|
on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
||||||
|
type: integer
|
||||||
scaleUpFactor:
|
scaleUpFactor:
|
||||||
description: ScaleUpFactor is the multiplicative factor applied
|
description: ScaleUpFactor is the multiplicative factor applied
|
||||||
to the current number of runners used to determine how many
|
to the current number of runners used to determine how many
|
||||||
@@ -104,9 +127,79 @@ spec:
|
|||||||
name:
|
name:
|
||||||
type: string
|
type: string
|
||||||
type: object
|
type: object
|
||||||
|
scaleUpTriggers:
|
||||||
|
description: "ScaleUpTriggers is an experimental feature to increase
|
||||||
|
the desired replicas by 1 on each webhook requested received by the
|
||||||
|
webhookBasedAutoscaler. \n This feature requires you to also enable
|
||||||
|
and deploy the webhookBasedAutoscaler onto your cluster. \n Note that
|
||||||
|
the added runners remain until the next sync period at least, and
|
||||||
|
they may or may not be used by GitHub Actions depending on the timing.
|
||||||
|
They are intended to be used to gain \"resource slack\" immediately
|
||||||
|
after you receive a webhook from GitHub, so that you can loosely expect
|
||||||
|
MinReplicas runners to be always available."
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
amount:
|
||||||
|
type: integer
|
||||||
|
duration:
|
||||||
|
type: string
|
||||||
|
githubEvent:
|
||||||
|
properties:
|
||||||
|
checkRun:
|
||||||
|
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||||
|
properties:
|
||||||
|
names:
|
||||||
|
description: Names is a list of GitHub Actions glob patterns.
|
||||||
|
Any check_run event whose name matches one of patterns
|
||||||
|
in the list can trigger autoscaling. Note that check_run
|
||||||
|
name seem to equal to the job name you've defined in
|
||||||
|
your actions workflow yaml file. So it is very likely
|
||||||
|
that you can utilize this to trigger depending on the
|
||||||
|
job.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
status:
|
||||||
|
type: string
|
||||||
|
types:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
|
pullRequest:
|
||||||
|
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||||
|
properties:
|
||||||
|
branches:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
types:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
|
push:
|
||||||
|
description: PushSpec is the condition for triggering scale-up
|
||||||
|
on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
type: object
|
type: object
|
||||||
status:
|
status:
|
||||||
properties:
|
properties:
|
||||||
|
cacheEntries:
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
expirationTime:
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
key:
|
||||||
|
type: string
|
||||||
|
value:
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
desiredReplicas:
|
desiredReplicas:
|
||||||
description: DesiredReplicas is the total number of desired, non-terminated
|
description: DesiredReplicas is the total number of desired, non-terminated
|
||||||
and latest pods to be set for the primary RunnerSet This doesn't include
|
and latest pods to be set for the primary RunnerSet This doesn't include
|
||||||
@@ -114,6 +207,7 @@ spec:
|
|||||||
type: integer
|
type: integer
|
||||||
lastSuccessfulScaleOutTime:
|
lastSuccessfulScaleOutTime:
|
||||||
format: date-time
|
format: date-time
|
||||||
|
nullable: true
|
||||||
type: string
|
type: string
|
||||||
observedGeneration:
|
observedGeneration:
|
||||||
description: ObservedGeneration is the most recent generation observed
|
description: ObservedGeneration is the most recent generation observed
|
||||||
|
|||||||
@@ -38,11 +38,42 @@ spec:
|
|||||||
metadata:
|
metadata:
|
||||||
type: object
|
type: object
|
||||||
spec:
|
spec:
|
||||||
description: RunnerReplicaSetSpec defines the desired state of RunnerDeployment
|
description: RunnerDeploymentSpec defines the desired state of RunnerDeployment
|
||||||
properties:
|
properties:
|
||||||
replicas:
|
replicas:
|
||||||
nullable: true
|
nullable: true
|
||||||
type: integer
|
type: integer
|
||||||
|
selector:
|
||||||
|
description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.
|
||||||
|
nullable: true
|
||||||
|
properties:
|
||||||
|
matchExpressions:
|
||||||
|
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||||
|
items:
|
||||||
|
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||||
|
properties:
|
||||||
|
key:
|
||||||
|
description: key is the label key that the selector applies to.
|
||||||
|
type: string
|
||||||
|
operator:
|
||||||
|
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||||
|
type: string
|
||||||
|
values:
|
||||||
|
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- key
|
||||||
|
- operator
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
matchLabels:
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
template:
|
template:
|
||||||
properties:
|
properties:
|
||||||
metadata:
|
metadata:
|
||||||
@@ -402,6 +433,36 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
dockerMTU:
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
dockerVolumeMounts:
|
||||||
|
items:
|
||||||
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
properties:
|
||||||
|
mountPath:
|
||||||
|
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||||
|
type: string
|
||||||
|
mountPropagation:
|
||||||
|
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
description: This must match the Name of a Volume.
|
||||||
|
type: string
|
||||||
|
readOnly:
|
||||||
|
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||||
|
type: boolean
|
||||||
|
subPath:
|
||||||
|
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||||
|
type: string
|
||||||
|
subPathExpr:
|
||||||
|
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- mountPath
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
dockerdContainerResources:
|
dockerdContainerResources:
|
||||||
description: ResourceRequirements describes the compute resource requirements.
|
description: ResourceRequirements describes the compute resource requirements.
|
||||||
properties:
|
properties:
|
||||||
@@ -426,6 +487,9 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
dockerdWithinRunnerContainer:
|
dockerdWithinRunnerContainer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enterprise:
|
||||||
|
pattern: ^[^/]+$
|
||||||
|
type: string
|
||||||
env:
|
env:
|
||||||
items:
|
items:
|
||||||
description: EnvVar represents an environment variable present in a Container.
|
description: EnvVar represents an environment variable present in a Container.
|
||||||
@@ -543,6 +607,20 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
|
hostAliases:
|
||||||
|
items:
|
||||||
|
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||||
|
properties:
|
||||||
|
hostnames:
|
||||||
|
description: Hostnames for the above IP address.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
ip:
|
||||||
|
description: IP address of the host file entry.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
image:
|
image:
|
||||||
type: string
|
type: string
|
||||||
imagePullPolicy:
|
imagePullPolicy:
|
||||||
@@ -731,6 +809,12 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
volumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
volumes:
|
volumes:
|
||||||
items:
|
items:
|
||||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||||
|
|||||||
@@ -43,6 +43,37 @@ spec:
|
|||||||
replicas:
|
replicas:
|
||||||
nullable: true
|
nullable: true
|
||||||
type: integer
|
type: integer
|
||||||
|
selector:
|
||||||
|
description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.
|
||||||
|
nullable: true
|
||||||
|
properties:
|
||||||
|
matchExpressions:
|
||||||
|
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||||
|
items:
|
||||||
|
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||||
|
properties:
|
||||||
|
key:
|
||||||
|
description: key is the label key that the selector applies to.
|
||||||
|
type: string
|
||||||
|
operator:
|
||||||
|
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||||
|
type: string
|
||||||
|
values:
|
||||||
|
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- key
|
||||||
|
- operator
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
matchLabels:
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
template:
|
template:
|
||||||
properties:
|
properties:
|
||||||
metadata:
|
metadata:
|
||||||
@@ -402,6 +433,36 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
dockerMTU:
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
dockerVolumeMounts:
|
||||||
|
items:
|
||||||
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
properties:
|
||||||
|
mountPath:
|
||||||
|
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||||
|
type: string
|
||||||
|
mountPropagation:
|
||||||
|
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
description: This must match the Name of a Volume.
|
||||||
|
type: string
|
||||||
|
readOnly:
|
||||||
|
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||||
|
type: boolean
|
||||||
|
subPath:
|
||||||
|
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||||
|
type: string
|
||||||
|
subPathExpr:
|
||||||
|
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- mountPath
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
dockerdContainerResources:
|
dockerdContainerResources:
|
||||||
description: ResourceRequirements describes the compute resource requirements.
|
description: ResourceRequirements describes the compute resource requirements.
|
||||||
properties:
|
properties:
|
||||||
@@ -426,6 +487,9 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
dockerdWithinRunnerContainer:
|
dockerdWithinRunnerContainer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enterprise:
|
||||||
|
pattern: ^[^/]+$
|
||||||
|
type: string
|
||||||
env:
|
env:
|
||||||
items:
|
items:
|
||||||
description: EnvVar represents an environment variable present in a Container.
|
description: EnvVar represents an environment variable present in a Container.
|
||||||
@@ -543,6 +607,20 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
|
hostAliases:
|
||||||
|
items:
|
||||||
|
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||||
|
properties:
|
||||||
|
hostnames:
|
||||||
|
description: Hostnames for the above IP address.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
ip:
|
||||||
|
description: IP address of the host file entry.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
image:
|
image:
|
||||||
type: string
|
type: string
|
||||||
imagePullPolicy:
|
imagePullPolicy:
|
||||||
@@ -731,6 +809,12 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
volumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
volumes:
|
volumes:
|
||||||
items:
|
items:
|
||||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||||
|
|||||||
@@ -7,6 +7,9 @@ metadata:
|
|||||||
name: runners.actions.summerwind.dev
|
name: runners.actions.summerwind.dev
|
||||||
spec:
|
spec:
|
||||||
additionalPrinterColumns:
|
additionalPrinterColumns:
|
||||||
|
- JSONPath: .spec.enterprise
|
||||||
|
name: Enterprise
|
||||||
|
type: string
|
||||||
- JSONPath: .spec.organization
|
- JSONPath: .spec.organization
|
||||||
name: Organization
|
name: Organization
|
||||||
type: string
|
type: string
|
||||||
@@ -395,6 +398,36 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
dockerEnabled:
|
dockerEnabled:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
dockerMTU:
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
dockerVolumeMounts:
|
||||||
|
items:
|
||||||
|
description: VolumeMount describes a mounting of a Volume within a container.
|
||||||
|
properties:
|
||||||
|
mountPath:
|
||||||
|
description: Path within the container at which the volume should be mounted. Must not contain ':'.
|
||||||
|
type: string
|
||||||
|
mountPropagation:
|
||||||
|
description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
description: This must match the Name of a Volume.
|
||||||
|
type: string
|
||||||
|
readOnly:
|
||||||
|
description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
|
||||||
|
type: boolean
|
||||||
|
subPath:
|
||||||
|
description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
|
||||||
|
type: string
|
||||||
|
subPathExpr:
|
||||||
|
description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- mountPath
|
||||||
|
- name
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
dockerdContainerResources:
|
dockerdContainerResources:
|
||||||
description: ResourceRequirements describes the compute resource requirements.
|
description: ResourceRequirements describes the compute resource requirements.
|
||||||
properties:
|
properties:
|
||||||
@@ -419,6 +452,9 @@ spec:
|
|||||||
type: object
|
type: object
|
||||||
dockerdWithinRunnerContainer:
|
dockerdWithinRunnerContainer:
|
||||||
type: boolean
|
type: boolean
|
||||||
|
enterprise:
|
||||||
|
pattern: ^[^/]+$
|
||||||
|
type: string
|
||||||
env:
|
env:
|
||||||
items:
|
items:
|
||||||
description: EnvVar represents an environment variable present in a Container.
|
description: EnvVar represents an environment variable present in a Container.
|
||||||
@@ -536,6 +572,20 @@ spec:
|
|||||||
type: array
|
type: array
|
||||||
group:
|
group:
|
||||||
type: string
|
type: string
|
||||||
|
hostAliases:
|
||||||
|
items:
|
||||||
|
description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.
|
||||||
|
properties:
|
||||||
|
hostnames:
|
||||||
|
description: Hostnames for the above IP address.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
ip:
|
||||||
|
description: IP address of the host file entry.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
image:
|
image:
|
||||||
type: string
|
type: string
|
||||||
imagePullPolicy:
|
imagePullPolicy:
|
||||||
@@ -724,6 +774,12 @@ spec:
|
|||||||
- name
|
- name
|
||||||
type: object
|
type: object
|
||||||
type: array
|
type: array
|
||||||
|
volumeSizeLimit:
|
||||||
|
anyOf:
|
||||||
|
- type: integer
|
||||||
|
- type: string
|
||||||
|
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||||
|
x-kubernetes-int-or-string: true
|
||||||
volumes:
|
volumes:
|
||||||
items:
|
items:
|
||||||
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
description: Volume represents a named volume in a pod that may be accessed by any container in the pod.
|
||||||
@@ -1532,6 +1588,10 @@ spec:
|
|||||||
status:
|
status:
|
||||||
description: RunnerStatus defines the observed state of Runner
|
description: RunnerStatus defines the observed state of Runner
|
||||||
properties:
|
properties:
|
||||||
|
lastRegistrationCheckTime:
|
||||||
|
format: date-time
|
||||||
|
nullable: true
|
||||||
|
type: string
|
||||||
message:
|
message:
|
||||||
type: string
|
type: string
|
||||||
phase:
|
phase:
|
||||||
@@ -1541,6 +1601,8 @@ spec:
|
|||||||
registration:
|
registration:
|
||||||
description: RunnerStatusRegistration contains runner registration status
|
description: RunnerStatusRegistration contains runner registration status
|
||||||
properties:
|
properties:
|
||||||
|
enterprise:
|
||||||
|
type: string
|
||||||
expiresAt:
|
expiresAt:
|
||||||
format: date-time
|
format: date-time
|
||||||
type: string
|
type: string
|
||||||
@@ -1558,11 +1620,6 @@ spec:
|
|||||||
- expiresAt
|
- expiresAt
|
||||||
- token
|
- token
|
||||||
type: object
|
type: object
|
||||||
required:
|
|
||||||
- message
|
|
||||||
- phase
|
|
||||||
- reason
|
|
||||||
- registration
|
|
||||||
type: object
|
type: object
|
||||||
type: object
|
type: object
|
||||||
version: v1alpha1
|
version: v1alpha1
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: kube-rbac-proxy
|
- name: kube-rbac-proxy
|
||||||
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1
|
image: quay.io/brancz/kube-rbac-proxy:v0.8.0
|
||||||
args:
|
args:
|
||||||
- "--secure-listen-address=0.0.0.0:8443"
|
- "--secure-listen-address=0.0.0.0:8443"
|
||||||
- "--upstream=http://127.0.0.1:8080/"
|
- "--upstream=http://127.0.0.1:8080/"
|
||||||
|
|||||||
@@ -7,8 +7,11 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -19,7 +22,48 @@ const (
|
|||||||
defaultScaleDownFactor = 0.7
|
defaultScaleDownFactor = 0.7
|
||||||
)
|
)
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) determineDesiredReplicas(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
func getValueAvailableAt(now time.Time, from, to *time.Time, reservedValue int) *int {
|
||||||
|
if to != nil && now.After(*to) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if from != nil && now.Before(*from) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &reservedValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HorizontalRunnerAutoscalerReconciler) fetchSuggestedReplicasFromCache(hra v1alpha1.HorizontalRunnerAutoscaler) *int {
|
||||||
|
var entry *v1alpha1.CacheEntry
|
||||||
|
|
||||||
|
for i := range hra.Status.CacheEntries {
|
||||||
|
ent := hra.Status.CacheEntries[i]
|
||||||
|
|
||||||
|
if ent.Key != v1alpha1.CacheEntryKeyDesiredReplicas {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !time.Now().Before(ent.ExpirationTime.Time) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
entry = &ent
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry != nil {
|
||||||
|
v := getValueAvailableAt(time.Now(), nil, &entry.ExpirationTime.Time, entry.Value)
|
||||||
|
if v != nil {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||||
if hra.Spec.MinReplicas == nil {
|
if hra.Spec.MinReplicas == nil {
|
||||||
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
|
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
|
||||||
} else if hra.Spec.MaxReplicas == nil {
|
} else if hra.Spec.MaxReplicas == nil {
|
||||||
@@ -27,16 +71,22 @@ func (r *HorizontalRunnerAutoscalerReconciler) determineDesiredReplicas(rd v1alp
|
|||||||
}
|
}
|
||||||
|
|
||||||
metrics := hra.Spec.Metrics
|
metrics := hra.Spec.Metrics
|
||||||
if len(metrics) == 0 || metrics[0].Type == v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns {
|
if len(metrics) == 0 {
|
||||||
return r.calculateReplicasByQueuedAndInProgressWorkflowRuns(rd, hra)
|
if len(hra.Spec.ScaleUpTriggers) == 0 {
|
||||||
|
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(rd, hra)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
} else if metrics[0].Type == v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns {
|
||||||
|
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(rd, hra)
|
||||||
} else if metrics[0].Type == v1alpha1.AutoscalingMetricTypePercentageRunnersBusy {
|
} else if metrics[0].Type == v1alpha1.AutoscalingMetricTypePercentageRunnersBusy {
|
||||||
return r.calculateReplicasByPercentageRunnersBusy(rd, hra)
|
return r.suggestReplicasByPercentageRunnersBusy(rd, hra)
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf("validting autoscaling metrics: unsupported metric type %q", metrics[0].Type)
|
return nil, fmt.Errorf("validting autoscaling metrics: unsupported metric type %q", metrics[0].Type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInProgressWorkflowRuns(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||||
|
|
||||||
var repos [][]string
|
var repos [][]string
|
||||||
metrics := hra.Spec.Metrics
|
metrics := hra.Spec.Metrics
|
||||||
@@ -47,6 +97,13 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInPro
|
|||||||
return nil, fmt.Errorf("asserting runner deployment spec to detect bug: spec.template.organization should not be empty on this code path")
|
return nil, fmt.Errorf("asserting runner deployment spec to detect bug: spec.template.organization should not be empty on this code path")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// In case it's an organizational runners deployment without any scaling metrics defined,
|
||||||
|
// we assume that the desired replicas should always be `minReplicas + capacityReservedThroughWebhook`.
|
||||||
|
// See https://github.com/summerwind/actions-runner-controller/issues/377#issuecomment-793372693
|
||||||
|
if len(metrics) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
if len(metrics[0].RepositoryNames) == 0 {
|
if len(metrics[0].RepositoryNames) == 0 {
|
||||||
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment")
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment")
|
||||||
}
|
}
|
||||||
@@ -96,12 +153,12 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInPro
|
|||||||
|
|
||||||
for _, repo := range repos {
|
for _, repo := range repos {
|
||||||
user, repoName := repo[0], repo[1]
|
user, repoName := repo[0], repo[1]
|
||||||
list, _, err := r.GitHubClient.Actions.ListRepositoryWorkflowRuns(context.TODO(), user, repoName, nil)
|
workflowRuns, err := r.GitHubClient.ListRepositoryWorkflowRuns(context.TODO(), user, repoName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, run := range list.WorkflowRuns {
|
for _, run := range workflowRuns {
|
||||||
total++
|
total++
|
||||||
|
|
||||||
// In May 2020, there are only 3 statuses.
|
// In May 2020, there are only 3 statuses.
|
||||||
@@ -121,42 +178,24 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInPro
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
minReplicas := *hra.Spec.MinReplicas
|
|
||||||
maxReplicas := *hra.Spec.MaxReplicas
|
|
||||||
necessaryReplicas := queued + inProgress
|
necessaryReplicas := queued + inProgress
|
||||||
|
|
||||||
var desiredReplicas int
|
|
||||||
|
|
||||||
if necessaryReplicas < minReplicas {
|
|
||||||
desiredReplicas = minReplicas
|
|
||||||
} else if necessaryReplicas > maxReplicas {
|
|
||||||
desiredReplicas = maxReplicas
|
|
||||||
} else {
|
|
||||||
desiredReplicas = necessaryReplicas
|
|
||||||
}
|
|
||||||
|
|
||||||
rd.Status.Replicas = &desiredReplicas
|
|
||||||
replicas := desiredReplicas
|
|
||||||
|
|
||||||
r.Log.V(1).Info(
|
r.Log.V(1).Info(
|
||||||
"Calculated desired replicas",
|
fmt.Sprintf("Suggested desired replicas of %d by TotalNumberOfQueuedAndInProgressWorkflowRuns", necessaryReplicas),
|
||||||
"computed_replicas_desired", desiredReplicas,
|
|
||||||
"spec_replicas_min", minReplicas,
|
|
||||||
"spec_replicas_max", maxReplicas,
|
|
||||||
"workflow_runs_completed", completed,
|
"workflow_runs_completed", completed,
|
||||||
"workflow_runs_in_progress", inProgress,
|
"workflow_runs_in_progress", inProgress,
|
||||||
"workflow_runs_queued", queued,
|
"workflow_runs_queued", queued,
|
||||||
"workflow_runs_unknown", unknown,
|
"workflow_runs_unknown", unknown,
|
||||||
|
"namespace", hra.Namespace,
|
||||||
|
"runner_deployment", rd.Name,
|
||||||
|
"horizontal_runner_autoscaler", hra.Name,
|
||||||
)
|
)
|
||||||
|
|
||||||
return &replicas, nil
|
return &necessaryReplicas, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunnersBusy(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
orgName := rd.Spec.Template.Spec.Organization
|
|
||||||
minReplicas := *hra.Spec.MinReplicas
|
|
||||||
maxReplicas := *hra.Spec.MaxReplicas
|
|
||||||
metrics := hra.Spec.Metrics[0]
|
metrics := hra.Spec.Metrics[0]
|
||||||
scaleUpThreshold := defaultScaleUpThreshold
|
scaleUpThreshold := defaultScaleUpThreshold
|
||||||
scaleDownThreshold := defaultScaleDownThreshold
|
scaleDownThreshold := defaultScaleDownThreshold
|
||||||
@@ -178,14 +217,34 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunn
|
|||||||
|
|
||||||
scaleDownThreshold = sdt
|
scaleDownThreshold = sdt
|
||||||
}
|
}
|
||||||
if metrics.ScaleUpFactor != "" {
|
|
||||||
|
scaleUpAdjustment := metrics.ScaleUpAdjustment
|
||||||
|
if scaleUpAdjustment != 0 {
|
||||||
|
if metrics.ScaleUpAdjustment < 0 {
|
||||||
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleUpAdjustment cannot be lower than 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
if metrics.ScaleUpFactor != "" {
|
||||||
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[]: scaleUpAdjustment and scaleUpFactor cannot be specified together")
|
||||||
|
}
|
||||||
|
} else if metrics.ScaleUpFactor != "" {
|
||||||
suf, err := strconv.ParseFloat(metrics.ScaleUpFactor, 64)
|
suf, err := strconv.ParseFloat(metrics.ScaleUpFactor, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleUpFactor cannot be parsed into a float64")
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleUpFactor cannot be parsed into a float64")
|
||||||
}
|
}
|
||||||
scaleUpFactor = suf
|
scaleUpFactor = suf
|
||||||
}
|
}
|
||||||
if metrics.ScaleDownFactor != "" {
|
|
||||||
|
scaleDownAdjustment := metrics.ScaleDownAdjustment
|
||||||
|
if scaleDownAdjustment != 0 {
|
||||||
|
if metrics.ScaleDownAdjustment < 0 {
|
||||||
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleDownAdjustment cannot be lower than 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
if metrics.ScaleDownFactor != "" {
|
||||||
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[]: scaleDownAdjustment and scaleDownFactor cannot be specified together")
|
||||||
|
}
|
||||||
|
} else if metrics.ScaleDownFactor != "" {
|
||||||
sdf, err := strconv.ParseFloat(metrics.ScaleDownFactor, 64)
|
sdf, err := strconv.ParseFloat(metrics.ScaleDownFactor, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleDownFactor cannot be parsed into a float64")
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].scaleDownFactor cannot be parsed into a float64")
|
||||||
@@ -195,55 +254,114 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunn
|
|||||||
|
|
||||||
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
||||||
var runnerList v1alpha1.RunnerList
|
var runnerList v1alpha1.RunnerList
|
||||||
if err := r.List(ctx, &runnerList, client.InNamespace(rd.Namespace)); err != nil {
|
|
||||||
|
var opts []client.ListOption
|
||||||
|
|
||||||
|
opts = append(opts, client.InNamespace(rd.Namespace))
|
||||||
|
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(getSelector(&rd))
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
opts = append(opts, client.MatchingLabelsSelector{Selector: selector})
|
||||||
|
|
||||||
|
r.Log.V(2).Info("Finding runners with selector", "ns", rd.Namespace)
|
||||||
|
|
||||||
|
if err := r.List(
|
||||||
|
ctx,
|
||||||
|
&runnerList,
|
||||||
|
opts...,
|
||||||
|
); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
runnerMap := make(map[string]struct{})
|
runnerMap := make(map[string]struct{})
|
||||||
for _, items := range runnerList.Items {
|
for _, items := range runnerList.Items {
|
||||||
runnerMap[items.Name] = struct{}{}
|
runnerMap[items.Name] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
enterprise = rd.Spec.Template.Spec.Enterprise
|
||||||
|
organization = rd.Spec.Template.Spec.Organization
|
||||||
|
repository = rd.Spec.Template.Spec.Repository
|
||||||
|
)
|
||||||
|
|
||||||
// ListRunners will return all runners managed by GitHub - not restricted to ns
|
// ListRunners will return all runners managed by GitHub - not restricted to ns
|
||||||
runners, err := r.GitHubClient.ListRunners(ctx, orgName, "")
|
runners, err := r.GitHubClient.ListRunners(
|
||||||
|
ctx,
|
||||||
|
enterprise,
|
||||||
|
organization,
|
||||||
|
repository)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
numRunners := len(runnerList.Items)
|
|
||||||
numRunnersBusy := 0
|
var desiredReplicasBefore int
|
||||||
|
|
||||||
|
if v := rd.Spec.Replicas; v == nil {
|
||||||
|
desiredReplicasBefore = 1
|
||||||
|
} else {
|
||||||
|
desiredReplicasBefore = *v
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
numRunners int
|
||||||
|
numRunnersRegistered int
|
||||||
|
numRunnersBusy int
|
||||||
|
)
|
||||||
|
|
||||||
|
numRunners = len(runnerList.Items)
|
||||||
|
|
||||||
for _, runner := range runners {
|
for _, runner := range runners {
|
||||||
if _, ok := runnerMap[*runner.Name]; ok && runner.GetBusy() {
|
if _, ok := runnerMap[*runner.Name]; ok {
|
||||||
numRunnersBusy++
|
numRunnersRegistered++
|
||||||
|
|
||||||
|
if runner.GetBusy() {
|
||||||
|
numRunnersBusy++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var desiredReplicas int
|
var desiredReplicas int
|
||||||
fractionBusy := float64(numRunnersBusy) / float64(numRunners)
|
fractionBusy := float64(numRunnersBusy) / float64(desiredReplicasBefore)
|
||||||
if fractionBusy >= scaleUpThreshold {
|
if fractionBusy >= scaleUpThreshold {
|
||||||
desiredReplicas = int(math.Ceil(float64(numRunners) * scaleUpFactor))
|
if scaleUpAdjustment > 0 {
|
||||||
|
desiredReplicas = desiredReplicasBefore + scaleUpAdjustment
|
||||||
|
} else {
|
||||||
|
desiredReplicas = int(math.Ceil(float64(desiredReplicasBefore) * scaleUpFactor))
|
||||||
|
}
|
||||||
} else if fractionBusy < scaleDownThreshold {
|
} else if fractionBusy < scaleDownThreshold {
|
||||||
desiredReplicas = int(float64(numRunners) * scaleDownFactor)
|
if scaleDownAdjustment > 0 {
|
||||||
|
desiredReplicas = desiredReplicasBefore - scaleDownAdjustment
|
||||||
|
} else {
|
||||||
|
desiredReplicas = int(float64(desiredReplicasBefore) * scaleDownFactor)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
desiredReplicas = *rd.Spec.Replicas
|
desiredReplicas = *rd.Spec.Replicas
|
||||||
}
|
}
|
||||||
|
|
||||||
if desiredReplicas < minReplicas {
|
// NOTES for operators:
|
||||||
desiredReplicas = minReplicas
|
//
|
||||||
} else if desiredReplicas > maxReplicas {
|
// - num_runners can be as twice as large as replicas_desired_before while
|
||||||
desiredReplicas = maxReplicas
|
// the runnerdeployment controller is replacing RunnerReplicaSet for runner update.
|
||||||
}
|
|
||||||
|
|
||||||
r.Log.V(1).Info(
|
r.Log.V(1).Info(
|
||||||
"Calculated desired replicas",
|
fmt.Sprintf("Suggested desired replicas of %d by PercentageRunnersBusy", desiredReplicas),
|
||||||
"computed_replicas_desired", desiredReplicas,
|
"replicas_desired_before", desiredReplicasBefore,
|
||||||
"spec_replicas_min", minReplicas,
|
"replicas_desired", desiredReplicas,
|
||||||
"spec_replicas_max", maxReplicas,
|
|
||||||
"current_replicas", rd.Spec.Replicas,
|
|
||||||
"num_runners", numRunners,
|
"num_runners", numRunners,
|
||||||
|
"num_runners_registered", numRunnersRegistered,
|
||||||
"num_runners_busy", numRunnersBusy,
|
"num_runners_busy", numRunnersBusy,
|
||||||
|
"namespace", hra.Namespace,
|
||||||
|
"runner_deployment", rd.Name,
|
||||||
|
"horizontal_runner_autoscaler", hra.Name,
|
||||||
|
"enterprise", enterprise,
|
||||||
|
"organization", organization,
|
||||||
|
"repository", repository,
|
||||||
)
|
)
|
||||||
|
|
||||||
rd.Status.Replicas = &desiredReplicas
|
return &desiredReplicas, nil
|
||||||
replicas := desiredReplicas
|
|
||||||
|
|
||||||
return &replicas, nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,14 +40,18 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
|
|
||||||
metav1Now := metav1.Now()
|
metav1Now := metav1.Now()
|
||||||
testcases := []struct {
|
testcases := []struct {
|
||||||
repo string
|
repo string
|
||||||
org string
|
org string
|
||||||
fixed *int
|
fixed *int
|
||||||
max *int
|
max *int
|
||||||
min *int
|
min *int
|
||||||
sReplicas *int
|
sReplicas *int
|
||||||
sTime *metav1.Time
|
sTime *metav1.Time
|
||||||
workflowRuns string
|
|
||||||
|
workflowRuns string
|
||||||
|
workflowRuns_queued string
|
||||||
|
workflowRuns_in_progress string
|
||||||
|
|
||||||
workflowJobs map[int]string
|
workflowJobs map[int]string
|
||||||
want int
|
want int
|
||||||
err string
|
err string
|
||||||
@@ -55,87 +59,107 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
// Legacy functionality
|
// Legacy functionality
|
||||||
// 3 demanded, max at 3
|
// 3 demanded, max at 3
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 3,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
|
want: 3,
|
||||||
},
|
},
|
||||||
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
sReplicas: intPtr(3),
|
sReplicas: intPtr(3),
|
||||||
sTime: &metav1Now,
|
sTime: &metav1Now,
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 3,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
|
want: 3,
|
||||||
},
|
},
|
||||||
// 3 demanded, max at 2
|
// 3 demanded, max at 2
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(2),
|
max: intPtr(2),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 2,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
|
want: 2,
|
||||||
},
|
},
|
||||||
// 2 demanded, min at 2
|
// 2 demanded, min at 2
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 2,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
|
want: 2,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 2
|
// 1 demanded, min at 2
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||||
want: 2,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
want: 2,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 2
|
// 1 demanded, min at 2
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 2,
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
|
want: 2,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 1
|
// 1 demanded, min at 1
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||||
want: 1,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
want: 1,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 1
|
// 1 demanded, min at 1
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 1,
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
|
want: 1,
|
||||||
},
|
},
|
||||||
// fixed at 3
|
// fixed at 3
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
fixed: intPtr(3),
|
fixed: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 3,
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
|
want: 3,
|
||||||
},
|
},
|
||||||
|
|
||||||
// Job-level autoscaling
|
// Job-level autoscaling
|
||||||
// 5 requested from 3 workflows
|
// 5 requested from 3 workflows
|
||||||
{
|
{
|
||||||
repo: "test/valid",
|
repo: "test/valid",
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(10),
|
max: intPtr(10),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"id": 1, "status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}]}"`,
|
||||||
workflowJobs: map[int]string{
|
workflowJobs: map[int]string{
|
||||||
1: `{"jobs": [{"status":"queued"}, {"status":"queued"}]}`,
|
1: `{"jobs": [{"status":"queued"}, {"status":"queued"}]}`,
|
||||||
2: `{"jobs": [{"status": "in_progress"}, {"status":"completed"}]}`,
|
2: `{"jobs": [{"status": "in_progress"}, {"status":"completed"}]}`,
|
||||||
@@ -157,7 +181,11 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
_ = v1alpha1.AddToScheme(scheme)
|
_ = v1alpha1.AddToScheme(scheme)
|
||||||
|
|
||||||
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
||||||
server := fake.NewServer(fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns), fake.WithListWorkflowJobsResponse(200, tc.workflowJobs))
|
server := fake.NewServer(
|
||||||
|
fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns, tc.workflowRuns_queued, tc.workflowRuns_in_progress),
|
||||||
|
fake.WithListWorkflowJobsResponse(200, tc.workflowJobs),
|
||||||
|
fake.WithListRunnersResponse(200, fake.RunnersListBody),
|
||||||
|
)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
client := newGithubClient(server)
|
client := newGithubClient(server)
|
||||||
|
|
||||||
@@ -196,7 +224,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
got, err := h.computeReplicas(rd, hra)
|
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, rd, hra)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if tc.err == "" {
|
if tc.err == "" {
|
||||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||||
@@ -206,12 +234,8 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if got == nil {
|
if got != tc.want {
|
||||||
t.Fatalf("unexpected value of rs.Spec.Replicas: nil")
|
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, got)
|
||||||
}
|
|
||||||
|
|
||||||
if *got != tc.want {
|
|
||||||
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, *got)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -224,129 +248,157 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
|
|
||||||
metav1Now := metav1.Now()
|
metav1Now := metav1.Now()
|
||||||
testcases := []struct {
|
testcases := []struct {
|
||||||
repos []string
|
repos []string
|
||||||
org string
|
org string
|
||||||
fixed *int
|
fixed *int
|
||||||
max *int
|
max *int
|
||||||
min *int
|
min *int
|
||||||
sReplicas *int
|
sReplicas *int
|
||||||
sTime *metav1.Time
|
sTime *metav1.Time
|
||||||
workflowRuns string
|
|
||||||
|
workflowRuns string
|
||||||
|
workflowRuns_queued string
|
||||||
|
workflowRuns_in_progress string
|
||||||
|
|
||||||
workflowJobs map[int]string
|
workflowJobs map[int]string
|
||||||
want int
|
want int
|
||||||
err string
|
err string
|
||||||
}{
|
}{
|
||||||
// 3 demanded, max at 3
|
// 3 demanded, max at 3
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
repos: []string{"valid"},
|
repos: []string{"valid"},
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 3,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
|
want: 3,
|
||||||
},
|
},
|
||||||
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
// 2 demanded, max at 3, currently 3, delay scaling down due to grace period
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
repos: []string{"valid"},
|
repos: []string{"valid"},
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
sReplicas: intPtr(3),
|
sReplicas: intPtr(3),
|
||||||
sTime: &metav1Now,
|
sTime: &metav1Now,
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 3,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
|
want: 3,
|
||||||
},
|
},
|
||||||
// 3 demanded, max at 2
|
// 3 demanded, max at 2
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
repos: []string{"valid"},
|
repos: []string{"valid"},
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(2),
|
max: intPtr(2),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 2,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}]}"`,
|
||||||
|
want: 2,
|
||||||
},
|
},
|
||||||
// 2 demanded, min at 2
|
// 2 demanded, min at 2
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
repos: []string{"valid"},
|
repos: []string{"valid"},
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 3, "workflow_runs":[{"status":"queued"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 2,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
|
want: 2,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 2
|
// 1 demanded, min at 2
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
repos: []string{"valid"},
|
repos: []string{"valid"},
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||||
want: 2,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
want: 2,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 2
|
// 1 demanded, min at 2
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
repos: []string{"valid"},
|
repos: []string{"valid"},
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 2,
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
|
want: 2,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 1
|
// 1 demanded, min at 1
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
repos: []string{"valid"},
|
repos: []string{"valid"},
|
||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"queued"}, {"status":"completed"}]}"`,
|
||||||
want: 1,
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
want: 1,
|
||||||
},
|
},
|
||||||
// 1 demanded, min at 1
|
// 1 demanded, min at 1
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
repos: []string{"valid"},
|
repos: []string{"valid"},
|
||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 1,
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
|
want: 1,
|
||||||
},
|
},
|
||||||
// fixed at 3
|
// fixed at 3
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
repos: []string{"valid"},
|
repos: []string{"valid"},
|
||||||
fixed: intPtr(1),
|
fixed: intPtr(1),
|
||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 3,
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`,
|
||||||
|
want: 3,
|
||||||
},
|
},
|
||||||
// org runner, fixed at 3
|
// org runner, fixed at 3
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
repos: []string{"valid"},
|
repos: []string{"valid"},
|
||||||
fixed: intPtr(1),
|
fixed: intPtr(1),
|
||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"status":"in_progress"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
want: 3,
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 3, "workflow_runs":[{"status":"in_progress"},{"status":"in_progress"},{"status":"in_progress"}]}"`,
|
||||||
|
want: 3,
|
||||||
},
|
},
|
||||||
// org runner, 1 demanded, min at 1, no repos
|
// org runner, 1 demanded, min at 1, no repos
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
min: intPtr(1),
|
min: intPtr(1),
|
||||||
max: intPtr(3),
|
max: intPtr(3),
|
||||||
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 2, "workflow_runs":[{"status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
err: "validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment",
|
workflowRuns_queued: `{"total_count": 0, "workflow_runs":[]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 1, "workflow_runs":[{"status":"in_progress"}]}"`,
|
||||||
|
err: "validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment",
|
||||||
},
|
},
|
||||||
|
|
||||||
// Job-level autoscaling
|
// Job-level autoscaling
|
||||||
// 5 requested from 3 workflows
|
// 5 requested from 3 workflows
|
||||||
{
|
{
|
||||||
org: "test",
|
org: "test",
|
||||||
repos: []string{"valid"},
|
repos: []string{"valid"},
|
||||||
min: intPtr(2),
|
min: intPtr(2),
|
||||||
max: intPtr(10),
|
max: intPtr(10),
|
||||||
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
workflowRuns: `{"total_count": 4, "workflow_runs":[{"id": 1, "status":"queued"}, {"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
|
workflowRuns_queued: `{"total_count": 1, "workflow_runs":[{"id": 1, "status":"queued"}]}"`,
|
||||||
|
workflowRuns_in_progress: `{"total_count": 2, "workflow_runs":[{"id": 2, "status":"in_progress"}, {"id": 3, "status":"in_progress"}, {"status":"completed"}]}"`,
|
||||||
workflowJobs: map[int]string{
|
workflowJobs: map[int]string{
|
||||||
1: `{"jobs": [{"status":"queued"}, {"status":"queued"}]}`,
|
1: `{"jobs": [{"status":"queued"}, {"status":"queued"}]}`,
|
||||||
2: `{"jobs": [{"status": "in_progress"}, {"status":"completed"}]}`,
|
2: `{"jobs": [{"status": "in_progress"}, {"status":"completed"}]}`,
|
||||||
@@ -368,7 +420,13 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
_ = v1alpha1.AddToScheme(scheme)
|
_ = v1alpha1.AddToScheme(scheme)
|
||||||
|
|
||||||
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
||||||
server := fake.NewServer(fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns), fake.WithListWorkflowJobsResponse(200, tc.workflowJobs))
|
t.Helper()
|
||||||
|
|
||||||
|
server := fake.NewServer(
|
||||||
|
fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns, tc.workflowRuns_queued, tc.workflowRuns_in_progress),
|
||||||
|
fake.WithListWorkflowJobsResponse(200, tc.workflowJobs),
|
||||||
|
fake.WithListRunnersResponse(200, fake.RunnersListBody),
|
||||||
|
)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
client := newGithubClient(server)
|
client := newGithubClient(server)
|
||||||
|
|
||||||
@@ -383,7 +441,17 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
Name: "testrd",
|
Name: "testrd",
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.RunnerDeploymentSpec{
|
Spec: v1alpha1.RunnerDeploymentSpec{
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
Template: v1alpha1.RunnerTemplate{
|
Template: v1alpha1.RunnerTemplate{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
Spec: v1alpha1.RunnerSpec{
|
Spec: v1alpha1.RunnerSpec{
|
||||||
Organization: tc.org,
|
Organization: tc.org,
|
||||||
},
|
},
|
||||||
@@ -415,7 +483,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
got, err := h.computeReplicas(rd, hra)
|
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, rd, hra)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if tc.err == "" {
|
if tc.err == "" {
|
||||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||||
@@ -425,12 +493,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if got == nil {
|
if got != tc.want {
|
||||||
t.Fatalf("unexpected value of rs.Spec.Replicas: nil, wanted %v", tc.want)
|
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, got)
|
||||||
}
|
|
||||||
|
|
||||||
if *got != tc.want {
|
|
||||||
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, *got)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
458
controllers/horizontal_runner_autoscaler_webhook.go
Normal file
458
controllers/horizontal_runner_autoscaler_webhook.go
Normal file
@@ -0,0 +1,458 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The actions-runner-controller authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"net/http"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
gogithub "github.com/google/go-github/v33/github"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
scaleTargetKey = "scaleTarget"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HorizontalRunnerAutoscalerGitHubWebhook autoscales a HorizontalRunnerAutoscaler and the RunnerDeployment on each
|
||||||
|
// GitHub Webhook received
|
||||||
|
type HorizontalRunnerAutoscalerGitHubWebhook struct {
|
||||||
|
client.Client
|
||||||
|
Log logr.Logger
|
||||||
|
Recorder record.EventRecorder
|
||||||
|
Scheme *runtime.Scheme
|
||||||
|
|
||||||
|
// SecretKeyBytes is the byte representation of the Webhook secret token
|
||||||
|
// the administrator is generated and specified in GitHub Web UI.
|
||||||
|
SecretKeyBytes []byte
|
||||||
|
|
||||||
|
// Namespace is the namespace to watch for HorizontalRunnerAutoscaler's to be
|
||||||
|
// scaled on Webhook.
|
||||||
|
// Set to empty for letting it watch for all namespaces.
|
||||||
|
Namespace string
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers,verbs=get;list;watch;create;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/status,verbs=get;update;patch
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var (
|
||||||
|
ok bool
|
||||||
|
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if !ok {
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
msg := err.Error()
|
||||||
|
if written, err := w.Write([]byte(msg)); err != nil {
|
||||||
|
autoscaler.Log.Error(err, "failed writing http error response", "msg", msg, "written", written)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if r.Body != nil {
|
||||||
|
r.Body.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// respond ok to GET / e.g. for health check
|
||||||
|
if r.Method == http.MethodGet {
|
||||||
|
fmt.Fprintln(w, "webhook server is running")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var payload []byte
|
||||||
|
|
||||||
|
if len(autoscaler.SecretKeyBytes) > 0 {
|
||||||
|
payload, err = gogithub.ValidatePayload(r, autoscaler.SecretKeyBytes)
|
||||||
|
if err != nil {
|
||||||
|
autoscaler.Log.Error(err, "error validating request body")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
payload, err = ioutil.ReadAll(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
autoscaler.Log.Error(err, "error reading request body")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
webhookType := gogithub.WebHookType(r)
|
||||||
|
event, err := gogithub.ParseWebHook(webhookType, payload)
|
||||||
|
if err != nil {
|
||||||
|
var s string
|
||||||
|
if payload != nil {
|
||||||
|
s = string(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
autoscaler.Log.Error(err, "could not parse webhook", "webhookType", webhookType, "payload", s)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var target *ScaleTarget
|
||||||
|
|
||||||
|
log := autoscaler.Log.WithValues(
|
||||||
|
"event", webhookType,
|
||||||
|
"hookID", r.Header.Get("X-GitHub-Hook-ID"),
|
||||||
|
"delivery", r.Header.Get("X-GitHub-Delivery"),
|
||||||
|
)
|
||||||
|
|
||||||
|
switch e := event.(type) {
|
||||||
|
case *gogithub.PushEvent:
|
||||||
|
target, err = autoscaler.getScaleUpTarget(
|
||||||
|
context.TODO(),
|
||||||
|
log,
|
||||||
|
e.Repo.GetName(),
|
||||||
|
e.Repo.Owner.GetLogin(),
|
||||||
|
e.Repo.Owner.GetType(),
|
||||||
|
autoscaler.MatchPushEvent(e),
|
||||||
|
)
|
||||||
|
case *gogithub.PullRequestEvent:
|
||||||
|
target, err = autoscaler.getScaleUpTarget(
|
||||||
|
context.TODO(),
|
||||||
|
log,
|
||||||
|
e.Repo.GetName(),
|
||||||
|
e.Repo.Owner.GetLogin(),
|
||||||
|
e.Repo.Owner.GetType(),
|
||||||
|
autoscaler.MatchPullRequestEvent(e),
|
||||||
|
)
|
||||||
|
|
||||||
|
if pullRequest := e.PullRequest; pullRequest != nil {
|
||||||
|
log = log.WithValues(
|
||||||
|
"pullRequest.base.ref", e.PullRequest.Base.GetRef(),
|
||||||
|
"action", e.GetAction(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case *gogithub.CheckRunEvent:
|
||||||
|
target, err = autoscaler.getScaleUpTarget(
|
||||||
|
context.TODO(),
|
||||||
|
log,
|
||||||
|
e.Repo.GetName(),
|
||||||
|
e.Repo.Owner.GetLogin(),
|
||||||
|
e.Repo.Owner.GetType(),
|
||||||
|
autoscaler.MatchCheckRunEvent(e),
|
||||||
|
)
|
||||||
|
|
||||||
|
if checkRun := e.GetCheckRun(); checkRun != nil {
|
||||||
|
log = log.WithValues(
|
||||||
|
"checkRun.status", checkRun.GetStatus(),
|
||||||
|
"action", e.GetAction(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
case *gogithub.PingEvent:
|
||||||
|
ok = true
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
|
msg := "pong"
|
||||||
|
|
||||||
|
if written, err := w.Write([]byte(msg)); err != nil {
|
||||||
|
log.Error(err, "failed writing http response", "msg", msg, "written", written)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("received ping event")
|
||||||
|
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
log.Info("unknown event type", "eventType", webhookType)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err, "handling check_run event")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if target == nil {
|
||||||
|
log.Info(
|
||||||
|
"Scale target not found. If this is unexpected, ensure that there is exactly one repository-wide or organizational runner deployment that matches this webhook event",
|
||||||
|
)
|
||||||
|
|
||||||
|
msg := "no horizontalrunnerautoscaler to scale for this github event"
|
||||||
|
|
||||||
|
ok = true
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
|
if written, err := w.Write([]byte(msg)); err != nil {
|
||||||
|
log.Error(err, "failed writing http response", "msg", msg, "written", written)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := autoscaler.tryScaleUp(context.TODO(), target); err != nil {
|
||||||
|
log.Error(err, "could not scale up")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ok = true
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
|
msg := fmt.Sprintf("scaled %s by 1", target.Name)
|
||||||
|
|
||||||
|
autoscaler.Log.Info(msg)
|
||||||
|
|
||||||
|
if written, err := w.Write([]byte(msg)); err != nil {
|
||||||
|
log.Error(err, "failed writing http response", "msg", msg, "written", written)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) findHRAsByKey(ctx context.Context, value string) ([]v1alpha1.HorizontalRunnerAutoscaler, error) {
|
||||||
|
ns := autoscaler.Namespace
|
||||||
|
|
||||||
|
var defaultListOpts []client.ListOption
|
||||||
|
|
||||||
|
if ns != "" {
|
||||||
|
defaultListOpts = append(defaultListOpts, client.InNamespace(ns))
|
||||||
|
}
|
||||||
|
|
||||||
|
var hras []v1alpha1.HorizontalRunnerAutoscaler
|
||||||
|
|
||||||
|
if value != "" {
|
||||||
|
opts := append([]client.ListOption{}, defaultListOpts...)
|
||||||
|
opts = append(opts, client.MatchingFields{scaleTargetKey: value})
|
||||||
|
|
||||||
|
if autoscaler.Namespace != "" {
|
||||||
|
opts = append(opts, client.InNamespace(autoscaler.Namespace))
|
||||||
|
}
|
||||||
|
|
||||||
|
var hraList v1alpha1.HorizontalRunnerAutoscalerList
|
||||||
|
|
||||||
|
if err := autoscaler.List(ctx, &hraList, opts...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range hraList.Items {
|
||||||
|
hras = append(hras, d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return hras, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchTriggerConditionAgainstEvent(types []string, eventAction *string) bool {
|
||||||
|
if len(types) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if eventAction == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tpe := range types {
|
||||||
|
if tpe == *eventAction {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScaleTarget struct {
|
||||||
|
v1alpha1.HorizontalRunnerAutoscaler
|
||||||
|
v1alpha1.ScaleUpTrigger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) searchScaleTargets(hras []v1alpha1.HorizontalRunnerAutoscaler, f func(v1alpha1.ScaleUpTrigger) bool) []ScaleTarget {
|
||||||
|
var matched []ScaleTarget
|
||||||
|
|
||||||
|
for _, hra := range hras {
|
||||||
|
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scaleUpTrigger := range hra.Spec.ScaleUpTriggers {
|
||||||
|
if !f(scaleUpTrigger) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
matched = append(matched, ScaleTarget{
|
||||||
|
HorizontalRunnerAutoscaler: hra,
|
||||||
|
ScaleUpTrigger: scaleUpTrigger,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return matched
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleTarget(ctx context.Context, name string, f func(v1alpha1.ScaleUpTrigger) bool) (*ScaleTarget, error) {
|
||||||
|
hras, err := autoscaler.findHRAsByKey(ctx, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
targets := autoscaler.searchScaleTargets(hras, f)
|
||||||
|
|
||||||
|
n := len(targets)
|
||||||
|
|
||||||
|
if n == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if n > 1 {
|
||||||
|
var scaleTargetIDs []string
|
||||||
|
|
||||||
|
for _, t := range targets {
|
||||||
|
scaleTargetIDs = append(scaleTargetIDs, t.HorizontalRunnerAutoscaler.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
autoscaler.Log.Info(
|
||||||
|
"Found too many scale targets: "+
|
||||||
|
"It must be exactly one to avoid ambiguity. "+
|
||||||
|
"Either set Namespace for the webhook-based autoscaler to let it only find HRAs in the namespace, "+
|
||||||
|
"or update Repository or Organization fields in your RunnerDeployment resources to fix the ambiguity.",
|
||||||
|
"scaleTargets", strings.Join(scaleTargetIDs, ","))
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &targets[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleUpTarget(ctx context.Context, log logr.Logger, repo, owner, ownerType string, f func(v1alpha1.ScaleUpTrigger) bool) (*ScaleTarget, error) {
|
||||||
|
repositoryRunnerKey := owner + "/" + repo
|
||||||
|
|
||||||
|
if target, err := autoscaler.getScaleTarget(ctx, repositoryRunnerKey, f); err != nil {
|
||||||
|
autoscaler.Log.Info("finding repository-wide runner", "repository", repositoryRunnerKey)
|
||||||
|
return nil, err
|
||||||
|
} else if target != nil {
|
||||||
|
autoscaler.Log.Info("scale up target is repository-wide runners", "repository", repo)
|
||||||
|
return target, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ownerType == "User" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if target, err := autoscaler.getScaleTarget(ctx, owner, f); err != nil {
|
||||||
|
log.Info("finding organizational runner", "organization", owner)
|
||||||
|
return nil, err
|
||||||
|
} else if target != nil {
|
||||||
|
log.Info("scale up target is organizational runners", "organization", owner)
|
||||||
|
return target, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) tryScaleUp(ctx context.Context, target *ScaleTarget) error {
|
||||||
|
if target == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
copy := target.HorizontalRunnerAutoscaler.DeepCopy()
|
||||||
|
|
||||||
|
amount := 1
|
||||||
|
|
||||||
|
if target.ScaleUpTrigger.Amount > 0 {
|
||||||
|
amount = target.ScaleUpTrigger.Amount
|
||||||
|
}
|
||||||
|
|
||||||
|
capacityReservations := getValidCapacityReservations(copy)
|
||||||
|
|
||||||
|
copy.Spec.CapacityReservations = append(capacityReservations, v1alpha1.CapacityReservation{
|
||||||
|
ExpirationTime: metav1.Time{Time: time.Now().Add(target.ScaleUpTrigger.Duration.Duration)},
|
||||||
|
Replicas: amount,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err := autoscaler.Client.Patch(ctx, copy, client.MergeFrom(&target.HorizontalRunnerAutoscaler)); err != nil {
|
||||||
|
return fmt.Errorf("patching horizontalrunnerautoscaler to add capacity reservation: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getValidCapacityReservations(autoscaler *v1alpha1.HorizontalRunnerAutoscaler) []v1alpha1.CapacityReservation {
|
||||||
|
var capacityReservations []v1alpha1.CapacityReservation
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
for _, reservation := range autoscaler.Spec.CapacityReservations {
|
||||||
|
if reservation.ExpirationTime.Time.After(now) {
|
||||||
|
capacityReservations = append(capacityReservations, reservation)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return capacityReservations
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
name := "webhookbasedautoscaler"
|
||||||
|
if autoscaler.Name != "" {
|
||||||
|
name = autoscaler.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
autoscaler.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
|
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, func(rawObj runtime.Object) []string {
|
||||||
|
hra := rawObj.(*v1alpha1.HorizontalRunnerAutoscaler)
|
||||||
|
|
||||||
|
if hra.Spec.ScaleTargetRef.Name == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var rd v1alpha1.RunnerDeployment
|
||||||
|
|
||||||
|
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return []string{rd.Spec.Template.Spec.Repository, rd.Spec.Template.Spec.Organization}
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
For(&v1alpha1.HorizontalRunnerAutoscaler{}).
|
||||||
|
Named(name).
|
||||||
|
Complete(autoscaler)
|
||||||
|
}
|
||||||
@@ -0,0 +1,43 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/google/go-github/v33/github"
|
||||||
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/summerwind/actions-runner-controller/pkg/actionsglob"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
return func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
g := scaleUpTrigger.GitHubEvent
|
||||||
|
|
||||||
|
if g == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
cr := g.CheckRun
|
||||||
|
|
||||||
|
if cr == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !matchTriggerConditionAgainstEvent(cr.Types, event.Action) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if cr.Status != "" && (event.CheckRun == nil || event.CheckRun.Status == nil || *event.CheckRun.Status != cr.Status) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if checkRun := event.CheckRun; checkRun != nil && len(cr.Names) > 0 {
|
||||||
|
for _, pat := range cr.Names {
|
||||||
|
if r := actionsglob.Match(pat, checkRun.GetName()); r {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,32 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/google/go-github/v33/github"
|
||||||
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPullRequestEvent(event *github.PullRequestEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
return func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
g := scaleUpTrigger.GitHubEvent
|
||||||
|
|
||||||
|
if g == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
pr := g.PullRequest
|
||||||
|
|
||||||
|
if pr == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !matchTriggerConditionAgainstEvent(pr.Types, event.Action) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !matchTriggerConditionAgainstEvent(pr.Branches, event.PullRequest.Base.Ref) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
24
controllers/horizontal_runner_autoscaler_webhook_on_push.go
Normal file
24
controllers/horizontal_runner_autoscaler_webhook_on_push.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/google/go-github/v33/github"
|
||||||
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event *github.PushEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
return func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
g := scaleUpTrigger.GitHubEvent
|
||||||
|
|
||||||
|
if g == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
push := g.Push
|
||||||
|
|
||||||
|
if push == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
314
controllers/horizontal_runner_autoscaler_webhook_test.go
Normal file
314
controllers/horizontal_runner_autoscaler_webhook_test.go
Normal file
@@ -0,0 +1,314 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"github.com/google/go-github/v33/github"
|
||||||
|
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
sc = runtime.NewScheme()
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
_ = clientgoscheme.AddToScheme(sc)
|
||||||
|
_ = actionsv1alpha1.AddToScheme(sc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOrgWebhookCheckRun(t *testing.T) {
|
||||||
|
f, err := os.Open("testdata/org_webhook_check_run_payload.json")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not open the fixture: %s", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
var e github.CheckRunEvent
|
||||||
|
if err := json.NewDecoder(f).Decode(&e); err != nil {
|
||||||
|
t.Fatalf("invalid json: %s", err)
|
||||||
|
}
|
||||||
|
testServer(t,
|
||||||
|
"check_run",
|
||||||
|
&e,
|
||||||
|
200,
|
||||||
|
"no horizontalrunnerautoscaler to scale for this github event",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRepoWebhookCheckRun(t *testing.T) {
|
||||||
|
f, err := os.Open("testdata/repo_webhook_check_run_payload.json")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not open the fixture: %s", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
var e github.CheckRunEvent
|
||||||
|
if err := json.NewDecoder(f).Decode(&e); err != nil {
|
||||||
|
t.Fatalf("invalid json: %s", err)
|
||||||
|
}
|
||||||
|
testServer(t,
|
||||||
|
"check_run",
|
||||||
|
&e,
|
||||||
|
200,
|
||||||
|
"no horizontalrunnerautoscaler to scale for this github event",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWebhookPullRequest(t *testing.T) {
|
||||||
|
testServer(t,
|
||||||
|
"pull_request",
|
||||||
|
&github.PullRequestEvent{
|
||||||
|
PullRequest: &github.PullRequest{
|
||||||
|
Base: &github.PullRequestBranch{
|
||||||
|
Ref: github.String("main"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Repo: &github.Repository{
|
||||||
|
Name: github.String("myorg/myrepo"),
|
||||||
|
Organization: &github.Organization{
|
||||||
|
Name: github.String("myorg"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: github.String("created"),
|
||||||
|
},
|
||||||
|
200,
|
||||||
|
"no horizontalrunnerautoscaler to scale for this github event",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWebhookPush(t *testing.T) {
|
||||||
|
testServer(t,
|
||||||
|
"push",
|
||||||
|
&github.PushEvent{
|
||||||
|
Repo: &github.PushEventRepository{
|
||||||
|
Name: github.String("myrepo"),
|
||||||
|
Organization: github.String("myorg"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
200,
|
||||||
|
"no horizontalrunnerautoscaler to scale for this github event",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWebhookPing(t *testing.T) {
|
||||||
|
testServer(t,
|
||||||
|
"ping",
|
||||||
|
&github.PingEvent{
|
||||||
|
Zen: github.String("zen"),
|
||||||
|
},
|
||||||
|
200,
|
||||||
|
"pong",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetRequest(t *testing.T) {
|
||||||
|
hra := HorizontalRunnerAutoscalerGitHubWebhook{}
|
||||||
|
request, _ := http.NewRequest(http.MethodGet, "/", nil)
|
||||||
|
recorder := httptest.ResponseRecorder{}
|
||||||
|
|
||||||
|
hra.Handle(&recorder, request)
|
||||||
|
response := recorder.Result()
|
||||||
|
|
||||||
|
if response.StatusCode != http.StatusOK {
|
||||||
|
t.Errorf("want %d, got %d", http.StatusOK, response.StatusCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetValidCapacityReservations(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
||||||
|
Spec: actionsv1alpha1.HorizontalRunnerAutoscalerSpec{
|
||||||
|
CapacityReservations: []actionsv1alpha1.CapacityReservation{
|
||||||
|
{
|
||||||
|
ExpirationTime: metav1.Time{Time: now.Add(-time.Second)},
|
||||||
|
Replicas: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ExpirationTime: metav1.Time{Time: now},
|
||||||
|
Replicas: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ExpirationTime: metav1.Time{Time: now.Add(time.Second)},
|
||||||
|
Replicas: 3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
revs := getValidCapacityReservations(hra)
|
||||||
|
|
||||||
|
var count int
|
||||||
|
|
||||||
|
for _, r := range revs {
|
||||||
|
count += r.Replicas
|
||||||
|
}
|
||||||
|
|
||||||
|
want := 3
|
||||||
|
|
||||||
|
if count != want {
|
||||||
|
t.Errorf("want %d, got %d", want, count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func installTestLogger(webhook *HorizontalRunnerAutoscalerGitHubWebhook) *bytes.Buffer {
|
||||||
|
logs := &bytes.Buffer{}
|
||||||
|
|
||||||
|
log := testLogger{
|
||||||
|
name: "testlog",
|
||||||
|
writer: logs,
|
||||||
|
}
|
||||||
|
|
||||||
|
webhook.Log = &log
|
||||||
|
|
||||||
|
return logs
|
||||||
|
}
|
||||||
|
|
||||||
|
func testServer(t *testing.T, eventType string, event interface{}, wantCode int, wantBody string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
hraWebhook := &HorizontalRunnerAutoscalerGitHubWebhook{}
|
||||||
|
|
||||||
|
var initObjs []runtime.Object
|
||||||
|
|
||||||
|
client := fake.NewFakeClientWithScheme(sc, initObjs...)
|
||||||
|
|
||||||
|
logs := installTestLogger(hraWebhook)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if t.Failed() {
|
||||||
|
t.Logf("diagnostics: %s", logs.String())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
hraWebhook.Client = client
|
||||||
|
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
mux.HandleFunc("/", hraWebhook.Handle)
|
||||||
|
|
||||||
|
server := httptest.NewServer(mux)
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
resp, err := sendWebhook(server, eventType, event)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if resp != nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if resp.StatusCode != wantCode {
|
||||||
|
t.Error("status:", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
respBody, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(respBody) != wantBody {
|
||||||
|
t.Fatal("body:", string(respBody))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sendWebhook(server *httptest.Server, eventType string, event interface{}) (*http.Response, error) {
|
||||||
|
jsonBuf := &bytes.Buffer{}
|
||||||
|
enc := json.NewEncoder(jsonBuf)
|
||||||
|
enc.SetIndent(" ", "")
|
||||||
|
err := enc.Encode(event)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("[bug in test] encoding event to json: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
reqBody := jsonBuf.Bytes()
|
||||||
|
|
||||||
|
u, err := url.Parse(server.URL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing server url: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &http.Request{
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: u,
|
||||||
|
Header: map[string][]string{
|
||||||
|
"X-GitHub-Event": {eventType},
|
||||||
|
"Content-Type": {"application/json"},
|
||||||
|
},
|
||||||
|
Body: ioutil.NopCloser(bytes.NewBuffer(reqBody)),
|
||||||
|
}
|
||||||
|
|
||||||
|
return http.DefaultClient.Do(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// testLogger is a sample logr.Logger that logs in-memory.
|
||||||
|
// It's only for testing log outputs.
|
||||||
|
type testLogger struct {
|
||||||
|
name string
|
||||||
|
keyValues map[string]interface{}
|
||||||
|
|
||||||
|
writer io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ logr.Logger = &testLogger{}
|
||||||
|
|
||||||
|
func (l *testLogger) Info(msg string, kvs ...interface{}) {
|
||||||
|
fmt.Fprintf(l.writer, "%s] %s\t", l.name, msg)
|
||||||
|
for k, v := range l.keyValues {
|
||||||
|
fmt.Fprintf(l.writer, "%s=%+v ", k, v)
|
||||||
|
}
|
||||||
|
for i := 0; i < len(kvs); i += 2 {
|
||||||
|
fmt.Fprintf(l.writer, "%s=%+v ", kvs[i], kvs[i+1])
|
||||||
|
}
|
||||||
|
fmt.Fprintf(l.writer, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ *testLogger) Enabled() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *testLogger) Error(err error, msg string, kvs ...interface{}) {
|
||||||
|
kvs = append(kvs, "error", err)
|
||||||
|
l.Info(msg, kvs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *testLogger) V(_ int) logr.InfoLogger {
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *testLogger) WithName(name string) logr.Logger {
|
||||||
|
return &testLogger{
|
||||||
|
name: l.name + "." + name,
|
||||||
|
keyValues: l.keyValues,
|
||||||
|
writer: l.writer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *testLogger) WithValues(kvs ...interface{}) logr.Logger {
|
||||||
|
newMap := make(map[string]interface{}, len(l.keyValues)+len(kvs)/2)
|
||||||
|
for k, v := range l.keyValues {
|
||||||
|
newMap[k] = v
|
||||||
|
}
|
||||||
|
for i := 0; i < len(kvs); i += 2 {
|
||||||
|
newMap[kvs[i].(string)] = kvs[i+1]
|
||||||
|
}
|
||||||
|
return &testLogger{
|
||||||
|
name: l.name,
|
||||||
|
keyValues: newMap,
|
||||||
|
writer: l.writer,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -18,6 +18,8 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/github"
|
"github.com/summerwind/actions-runner-controller/github"
|
||||||
@@ -29,10 +31,10 @@ import (
|
|||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/summerwind/actions-runner-controller/controllers/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -46,8 +48,13 @@ type HorizontalRunnerAutoscalerReconciler struct {
|
|||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
|
|
||||||
|
CacheDuration time.Duration
|
||||||
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const defaultReplicas = 1
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerdeployments,verbs=get;list;watch;update;patch
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerdeployments,verbs=get;list;watch;update;patch
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/finalizers,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -67,6 +74,8 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metrics.SetHorizontalRunnerAutoscalerSpec(hra.ObjectMeta, hra.Spec)
|
||||||
|
|
||||||
var rd v1alpha1.RunnerDeployment
|
var rd v1alpha1.RunnerDeployment
|
||||||
if err := r.Get(ctx, types.NamespacedName{
|
if err := r.Get(ctx, types.NamespacedName{
|
||||||
Namespace: req.Namespace,
|
Namespace: req.Namespace,
|
||||||
@@ -79,7 +88,9 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
replicas, err := r.computeReplicas(rd, hra)
|
now := time.Now()
|
||||||
|
|
||||||
|
newDesiredReplicas, computedReplicas, computedReplicasFromCache, err := r.computeReplicasWithCache(log, now, rd, hra)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||||
|
|
||||||
@@ -88,62 +99,144 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
const defaultReplicas = 1
|
|
||||||
|
|
||||||
currentDesiredReplicas := getIntOrDefault(rd.Spec.Replicas, defaultReplicas)
|
currentDesiredReplicas := getIntOrDefault(rd.Spec.Replicas, defaultReplicas)
|
||||||
newDesiredReplicas := getIntOrDefault(replicas, defaultReplicas)
|
|
||||||
|
|
||||||
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
||||||
if currentDesiredReplicas != newDesiredReplicas {
|
if currentDesiredReplicas != newDesiredReplicas {
|
||||||
copy := rd.DeepCopy()
|
copy := rd.DeepCopy()
|
||||||
copy.Spec.Replicas = &newDesiredReplicas
|
copy.Spec.Replicas = &newDesiredReplicas
|
||||||
|
|
||||||
if err := r.Client.Update(ctx, copy); err != nil {
|
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
|
||||||
log.Error(err, "Failed to update runnerderployment resource")
|
return ctrl.Result{}, fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err)
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if hra.Status.DesiredReplicas == nil || *hra.Status.DesiredReplicas != *replicas {
|
var updated *v1alpha1.HorizontalRunnerAutoscaler
|
||||||
updated := hra.DeepCopy()
|
|
||||||
|
|
||||||
if (hra.Status.DesiredReplicas == nil && *replicas > 1) ||
|
if hra.Status.DesiredReplicas == nil || *hra.Status.DesiredReplicas != newDesiredReplicas {
|
||||||
(hra.Status.DesiredReplicas != nil && *replicas > *hra.Status.DesiredReplicas) {
|
updated = hra.DeepCopy()
|
||||||
|
|
||||||
|
if (hra.Status.DesiredReplicas == nil && newDesiredReplicas > 1) ||
|
||||||
|
(hra.Status.DesiredReplicas != nil && newDesiredReplicas > *hra.Status.DesiredReplicas) {
|
||||||
|
|
||||||
updated.Status.LastSuccessfulScaleOutTime = &metav1.Time{Time: time.Now()}
|
updated.Status.LastSuccessfulScaleOutTime = &metav1.Time{Time: time.Now()}
|
||||||
}
|
}
|
||||||
|
|
||||||
updated.Status.DesiredReplicas = replicas
|
updated.Status.DesiredReplicas = &newDesiredReplicas
|
||||||
|
}
|
||||||
|
|
||||||
if err := r.Status().Update(ctx, updated); err != nil {
|
if computedReplicasFromCache == nil {
|
||||||
log.Error(err, "Failed to update horizontalrunnerautoscaler status")
|
if updated == nil {
|
||||||
|
updated = hra.DeepCopy()
|
||||||
|
}
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
cacheEntries := getValidCacheEntries(updated, now)
|
||||||
|
|
||||||
|
var cacheDuration time.Duration
|
||||||
|
|
||||||
|
if r.CacheDuration > 0 {
|
||||||
|
cacheDuration = r.CacheDuration
|
||||||
|
} else {
|
||||||
|
cacheDuration = 10 * time.Minute
|
||||||
|
}
|
||||||
|
|
||||||
|
updated.Status.CacheEntries = append(cacheEntries, v1alpha1.CacheEntry{
|
||||||
|
Key: v1alpha1.CacheEntryKeyDesiredReplicas,
|
||||||
|
Value: computedReplicas,
|
||||||
|
ExpirationTime: metav1.Time{Time: time.Now().Add(cacheDuration)},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if updated != nil {
|
||||||
|
metrics.SetHorizontalRunnerAutoscalerStatus(updated.ObjectMeta, updated.Status)
|
||||||
|
|
||||||
|
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&hra)); err != nil {
|
||||||
|
return ctrl.Result{}, fmt.Errorf("patching horizontalrunnerautoscaler status to add cache entry: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getValidCacheEntries(hra *v1alpha1.HorizontalRunnerAutoscaler, now time.Time) []v1alpha1.CacheEntry {
|
||||||
|
var cacheEntries []v1alpha1.CacheEntry
|
||||||
|
|
||||||
|
for _, ent := range hra.Status.CacheEntries {
|
||||||
|
if ent.ExpirationTime.After(now) {
|
||||||
|
cacheEntries = append(cacheEntries, ent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cacheEntries
|
||||||
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *HorizontalRunnerAutoscalerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
r.Recorder = mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller")
|
name := "horizontalrunnerautoscaler-controller"
|
||||||
|
if r.Name != "" {
|
||||||
|
name = r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
For(&v1alpha1.HorizontalRunnerAutoscaler{}).
|
For(&v1alpha1.HorizontalRunnerAutoscaler{}).
|
||||||
|
Named(name).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicas(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr.Logger, now time.Time, rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (int, int, *int, error) {
|
||||||
var computedReplicas *int
|
minReplicas := defaultReplicas
|
||||||
|
if hra.Spec.MinReplicas != nil && *hra.Spec.MinReplicas > 0 {
|
||||||
replicas, err := r.determineDesiredReplicas(rd, hra)
|
minReplicas = *hra.Spec.MinReplicas
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var suggestedReplicas int
|
||||||
|
|
||||||
|
suggestedReplicasFromCache := r.fetchSuggestedReplicasFromCache(hra)
|
||||||
|
|
||||||
|
var cached *int
|
||||||
|
|
||||||
|
if suggestedReplicasFromCache != nil {
|
||||||
|
cached = suggestedReplicasFromCache
|
||||||
|
|
||||||
|
if cached == nil {
|
||||||
|
suggestedReplicas = minReplicas
|
||||||
|
} else {
|
||||||
|
suggestedReplicas = *cached
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
v, err := r.suggestDesiredReplicas(rd, hra)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if v == nil {
|
||||||
|
suggestedReplicas = minReplicas
|
||||||
|
} else {
|
||||||
|
suggestedReplicas = *v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var reserved int
|
||||||
|
|
||||||
|
for _, reservation := range hra.Spec.CapacityReservations {
|
||||||
|
if reservation.ExpirationTime.Time.After(now) {
|
||||||
|
reserved += reservation.Replicas
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
newDesiredReplicas := suggestedReplicas + reserved
|
||||||
|
|
||||||
|
if newDesiredReplicas < minReplicas {
|
||||||
|
newDesiredReplicas = minReplicas
|
||||||
|
} else if hra.Spec.MaxReplicas != nil && newDesiredReplicas > *hra.Spec.MaxReplicas {
|
||||||
|
newDesiredReplicas = *hra.Spec.MaxReplicas
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Delay scaling-down for ScaleDownDelaySecondsAfterScaleUp or DefaultScaleDownDelay
|
||||||
|
//
|
||||||
|
|
||||||
var scaleDownDelay time.Duration
|
var scaleDownDelay time.Duration
|
||||||
|
|
||||||
if hra.Spec.ScaleDownDelaySecondsAfterScaleUp != nil {
|
if hra.Spec.ScaleDownDelaySecondsAfterScaleUp != nil {
|
||||||
@@ -152,17 +245,50 @@ func (r *HorizontalRunnerAutoscalerReconciler) computeReplicas(rd v1alpha1.Runne
|
|||||||
scaleDownDelay = DefaultScaleDownDelay
|
scaleDownDelay = DefaultScaleDownDelay
|
||||||
}
|
}
|
||||||
|
|
||||||
now := time.Now()
|
var scaleDownDelayUntil *time.Time
|
||||||
|
|
||||||
if hra.Status.DesiredReplicas == nil ||
|
if hra.Status.DesiredReplicas == nil ||
|
||||||
*hra.Status.DesiredReplicas < *replicas ||
|
*hra.Status.DesiredReplicas < newDesiredReplicas ||
|
||||||
hra.Status.LastSuccessfulScaleOutTime == nil ||
|
hra.Status.LastSuccessfulScaleOutTime == nil {
|
||||||
hra.Status.LastSuccessfulScaleOutTime.Add(scaleDownDelay).Before(now) {
|
|
||||||
|
|
||||||
computedReplicas = replicas
|
} else if hra.Status.LastSuccessfulScaleOutTime != nil {
|
||||||
|
t := hra.Status.LastSuccessfulScaleOutTime.Add(scaleDownDelay)
|
||||||
|
|
||||||
|
// ScaleDownDelay is not passed
|
||||||
|
if t.After(now) {
|
||||||
|
scaleDownDelayUntil = &t
|
||||||
|
newDesiredReplicas = *hra.Status.DesiredReplicas
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
computedReplicas = hra.Status.DesiredReplicas
|
newDesiredReplicas = *hra.Status.DesiredReplicas
|
||||||
}
|
}
|
||||||
|
|
||||||
return computedReplicas, nil
|
//
|
||||||
|
// Logs various numbers for monitoring and debugging purpose
|
||||||
|
//
|
||||||
|
|
||||||
|
kvs := []interface{}{
|
||||||
|
"suggested", suggestedReplicas,
|
||||||
|
"reserved", reserved,
|
||||||
|
"min", minReplicas,
|
||||||
|
}
|
||||||
|
|
||||||
|
if cached != nil {
|
||||||
|
kvs = append(kvs, "cached", *cached)
|
||||||
|
}
|
||||||
|
|
||||||
|
if scaleDownDelayUntil != nil {
|
||||||
|
kvs = append(kvs, "last_scale_up_time", *hra.Status.LastSuccessfulScaleOutTime)
|
||||||
|
kvs = append(kvs, "scale_down_delay_until", scaleDownDelayUntil)
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxReplicas := hra.Spec.MaxReplicas; maxReplicas != nil {
|
||||||
|
kvs = append(kvs, "max", *maxReplicas)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.V(1).Info(fmt.Sprintf("Calculated desired replicas of %d", newDesiredReplicas),
|
||||||
|
kvs...,
|
||||||
|
)
|
||||||
|
|
||||||
|
return newDesiredReplicas, suggestedReplicas, suggestedReplicasFromCache, nil
|
||||||
}
|
}
|
||||||
|
|||||||
49
controllers/horizontalrunnerautoscaler_controller_test.go
Normal file
49
controllers/horizontalrunnerautoscaler_controller_test.go
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetValidCacheEntries(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
||||||
|
Status: actionsv1alpha1.HorizontalRunnerAutoscalerStatus{
|
||||||
|
CacheEntries: []actionsv1alpha1.CacheEntry{
|
||||||
|
{
|
||||||
|
Key: "foo",
|
||||||
|
Value: 1,
|
||||||
|
ExpirationTime: metav1.Time{Time: now.Add(-time.Second)},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "foo",
|
||||||
|
Value: 2,
|
||||||
|
ExpirationTime: metav1.Time{Time: now},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "foo",
|
||||||
|
Value: 3,
|
||||||
|
ExpirationTime: metav1.Time{Time: now.Add(time.Second)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
revs := getValidCacheEntries(hra, now)
|
||||||
|
|
||||||
|
counts := map[string]int{}
|
||||||
|
|
||||||
|
for _, r := range revs {
|
||||||
|
counts[r.Key] += r.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
want := map[string]int{"foo": 3}
|
||||||
|
|
||||||
|
if d := cmp.Diff(want, counts); d != "" {
|
||||||
|
t.Errorf("%s", d)
|
||||||
|
}
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
67
controllers/metrics/horizontalrunnerautoscaler.go
Normal file
67
controllers/metrics/horizontalrunnerautoscaler.go
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
hraName = "horizontalrunnerautoscaler"
|
||||||
|
hraNamespace = "namespace"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
horizontalRunnerAutoscalerMetrics = []prometheus.Collector{
|
||||||
|
horizontalRunnerAutoscalerMinReplicas,
|
||||||
|
horizontalRunnerAutoscalerMaxReplicas,
|
||||||
|
horizontalRunnerAutoscalerDesiredReplicas,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
horizontalRunnerAutoscalerMinReplicas = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_spec_min_replicas",
|
||||||
|
Help: "minReplicas of HorizontalRunnerAutoscaler",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace},
|
||||||
|
)
|
||||||
|
horizontalRunnerAutoscalerMaxReplicas = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_spec_max_replicas",
|
||||||
|
Help: "maxReplicas of HorizontalRunnerAutoscaler",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace},
|
||||||
|
)
|
||||||
|
horizontalRunnerAutoscalerDesiredReplicas = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "horizontalrunnerautoscaler_status_desired_replicas",
|
||||||
|
Help: "desiredReplicas of HorizontalRunnerAutoscaler",
|
||||||
|
},
|
||||||
|
[]string{hraName, hraNamespace},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
func SetHorizontalRunnerAutoscalerSpec(o metav1.ObjectMeta, spec v1alpha1.HorizontalRunnerAutoscalerSpec) {
|
||||||
|
labels := prometheus.Labels{
|
||||||
|
hraName: o.Name,
|
||||||
|
hraNamespace: o.Namespace,
|
||||||
|
}
|
||||||
|
if spec.MaxReplicas != nil {
|
||||||
|
horizontalRunnerAutoscalerMaxReplicas.With(labels).Set(float64(*spec.MaxReplicas))
|
||||||
|
}
|
||||||
|
if spec.MinReplicas != nil {
|
||||||
|
horizontalRunnerAutoscalerMinReplicas.With(labels).Set(float64(*spec.MinReplicas))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetHorizontalRunnerAutoscalerStatus(o metav1.ObjectMeta, status v1alpha1.HorizontalRunnerAutoscalerStatus) {
|
||||||
|
labels := prometheus.Labels{
|
||||||
|
hraName: o.Name,
|
||||||
|
hraNamespace: o.Namespace,
|
||||||
|
}
|
||||||
|
if status.DesiredReplicas != nil {
|
||||||
|
horizontalRunnerAutoscalerDesiredReplicas.With(labels).Set(float64(*status.DesiredReplicas))
|
||||||
|
}
|
||||||
|
}
|
||||||
14
controllers/metrics/metrics.go
Normal file
14
controllers/metrics/metrics.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
// Package metrics provides the metrics of custom resources such as HRA.
|
||||||
|
//
|
||||||
|
// This depends on the metrics exporter of kubebuilder.
|
||||||
|
// See https://book.kubebuilder.io/reference/metrics.html for details.
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
metrics.Registry.MustRegister(runnerDeploymentMetrics...)
|
||||||
|
metrics.Registry.MustRegister(horizontalRunnerAutoscalerMetrics...)
|
||||||
|
}
|
||||||
37
controllers/metrics/runnerdeployment.go
Normal file
37
controllers/metrics/runnerdeployment.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
rdName = "runnerdeployment"
|
||||||
|
rdNamespace = "namespace"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
runnerDeploymentMetrics = []prometheus.Collector{
|
||||||
|
runnerDeploymentReplicas,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
runnerDeploymentReplicas = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "runnerdeployment_spec_replicas",
|
||||||
|
Help: "replicas of RunnerDeployment",
|
||||||
|
},
|
||||||
|
[]string{rdName, rdNamespace},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
func SetRunnerDeployment(rd v1alpha1.RunnerDeployment) {
|
||||||
|
labels := prometheus.Labels{
|
||||||
|
rdName: rd.Name,
|
||||||
|
rdNamespace: rd.Namespace,
|
||||||
|
}
|
||||||
|
if rd.Spec.Replicas != nil {
|
||||||
|
runnerDeploymentReplicas.With(labels).Set(float64(*rd.Spec.Replicas))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -18,12 +18,17 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/summerwind/actions-runner-controller/hash"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
gogithub "github.com/google/go-github/v33/github"
|
||||||
|
"github.com/summerwind/actions-runner-controller/hash"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
@@ -41,17 +46,22 @@ const (
|
|||||||
finalizerName = "runner.actions.summerwind.dev"
|
finalizerName = "runner.actions.summerwind.dev"
|
||||||
|
|
||||||
LabelKeyPodTemplateHash = "pod-template-hash"
|
LabelKeyPodTemplateHash = "pod-template-hash"
|
||||||
|
|
||||||
|
retryDelayOnGitHubAPIRateLimitError = 30 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunnerReconciler reconciles a Runner object
|
// RunnerReconciler reconciles a Runner object
|
||||||
type RunnerReconciler struct {
|
type RunnerReconciler struct {
|
||||||
client.Client
|
client.Client
|
||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
GitHubClient *github.Client
|
GitHubClient *github.Client
|
||||||
RunnerImage string
|
RunnerImage string
|
||||||
DockerImage string
|
DockerImage string
|
||||||
|
Name string
|
||||||
|
RegistrationRecheckInterval time.Duration
|
||||||
|
RegistrationRecheckJitter time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -95,9 +105,22 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
|
|
||||||
if removed {
|
if removed {
|
||||||
if len(runner.Status.Registration.Token) > 0 {
|
if len(runner.Status.Registration.Token) > 0 {
|
||||||
ok, err := r.unregisterRunner(ctx, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
ok, err := r.unregisterRunner(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err, "Failed to unregister runner")
|
if errors.Is(err, &gogithub.RateLimitError{}) {
|
||||||
|
// We log the underlying error when we failed calling GitHub API to list or unregisters,
|
||||||
|
// or the runner is still busy.
|
||||||
|
log.Error(
|
||||||
|
err,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Failed to unregister runner due to GitHub API rate limits. Delaying retry for %s to avoid excessive GitHub API calls",
|
||||||
|
retryDelayOnGitHubAPIRateLimitError,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
return ctrl.Result{RequeueAfter: retryDelayOnGitHubAPIRateLimitError}, err
|
||||||
|
}
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,8 +134,8 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
newRunner := runner.DeepCopy()
|
newRunner := runner.DeepCopy()
|
||||||
newRunner.ObjectMeta.Finalizers = finalizers
|
newRunner.ObjectMeta.Finalizers = finalizers
|
||||||
|
|
||||||
if err := r.Update(ctx, newRunner); err != nil {
|
if err := r.Patch(ctx, newRunner, client.MergeFrom(&runner)); err != nil {
|
||||||
log.Error(err, "Failed to update runner")
|
log.Error(err, "Failed to update runner for finalizer removal")
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -124,7 +147,7 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
|
|
||||||
var pod corev1.Pod
|
var pod corev1.Pod
|
||||||
if err := r.Get(ctx, req.NamespacedName, &pod); err != nil {
|
if err := r.Get(ctx, req.NamespacedName, &pod); err != nil {
|
||||||
if !errors.IsNotFound(err) {
|
if !kerrors.IsNotFound(err) {
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -141,35 +164,65 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := r.Create(ctx, &newPod); err != nil {
|
if err := r.Create(ctx, &newPod); err != nil {
|
||||||
|
if kerrors.IsAlreadyExists(err) {
|
||||||
|
// Gracefully handle pod-already-exists errors due to informer cache delay.
|
||||||
|
// Without this we got a few errors like the below on new runner pod:
|
||||||
|
// 2021-03-16T00:23:10.116Z ERROR controller-runtime.controller Reconciler error {"controller": "runner-controller", "request": "default/example-runnerdeploy-b2g2g-j4mcp", "error": "pods \"example-runnerdeploy-b2g2g-j4mcp\" already exists"}
|
||||||
|
log.Info(
|
||||||
|
"Failed to create pod due to AlreadyExists error. Probably this pod has been already created in previous reconcilation but is still not in the informer cache. Will retry on pod created. If it doesn't repeat, there's no problem",
|
||||||
|
)
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
log.Error(err, "Failed to create pod resource")
|
log.Error(err, "Failed to create pod resource")
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
r.Recorder.Event(&runner, corev1.EventTypeNormal, "PodCreated", fmt.Sprintf("Created pod '%s'", newPod.Name))
|
r.Recorder.Event(&runner, corev1.EventTypeNormal, "PodCreated", fmt.Sprintf("Created pod '%s'", newPod.Name))
|
||||||
log.Info("Created runner pod", "repository", runner.Spec.Repository)
|
log.Info("Created runner pod", "repository", runner.Spec.Repository)
|
||||||
} else {
|
} else {
|
||||||
|
if !pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
deletionTimeout := 1 * time.Minute
|
||||||
|
currentTime := time.Now()
|
||||||
|
deletionDidTimeout := currentTime.Sub(pod.DeletionTimestamp.Add(deletionTimeout)) > 0
|
||||||
|
|
||||||
|
if deletionDidTimeout {
|
||||||
|
log.Info(
|
||||||
|
fmt.Sprintf("Failed to delete pod within %s. ", deletionTimeout)+
|
||||||
|
"This is typically the case when a Kubernetes node became unreachable "+
|
||||||
|
"and the kube controller started evicting nodes. Forcefully deleting the pod to not get stuck.",
|
||||||
|
"podDeletionTimestamp", pod.DeletionTimestamp,
|
||||||
|
"currentTime", currentTime,
|
||||||
|
"configuredDeletionTimeout", deletionTimeout,
|
||||||
|
)
|
||||||
|
|
||||||
|
var force int64 = 0
|
||||||
|
// forcefully delete runner as we would otherwise get stuck if the node stays unreachable
|
||||||
|
if err := r.Delete(ctx, &pod, &client.DeleteOptions{GracePeriodSeconds: &force}); err != nil {
|
||||||
|
// probably
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
log.Error(err, "Failed to forcefully delete pod resource ...")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
// forceful deletion finally succeeded
|
||||||
|
return ctrl.Result{Requeue: true}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder.Event(&runner, corev1.EventTypeNormal, "PodDeleted", fmt.Sprintf("Forcefully deleted pod '%s'", pod.Name))
|
||||||
|
log.Info("Forcefully deleted runner pod", "repository", runner.Spec.Repository)
|
||||||
|
// give kube manager a little time to forcefully delete the stuck pod
|
||||||
|
return ctrl.Result{RequeueAfter: 3 * time.Second}, err
|
||||||
|
} else {
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// If pod has ended up succeeded we need to restart it
|
// If pod has ended up succeeded we need to restart it
|
||||||
// Happens e.g. when dind is in runner and run completes
|
// Happens e.g. when dind is in runner and run completes
|
||||||
restart := pod.Status.Phase == corev1.PodSucceeded
|
restart := pod.Status.Phase == corev1.PodSucceeded
|
||||||
|
|
||||||
if !restart && runner.Status.Phase != string(pod.Status.Phase) {
|
|
||||||
updated := runner.DeepCopy()
|
|
||||||
updated.Status.Phase = string(pod.Status.Phase)
|
|
||||||
updated.Status.Reason = pod.Status.Reason
|
|
||||||
updated.Status.Message = pod.Status.Message
|
|
||||||
|
|
||||||
if err := r.Status().Update(ctx, updated); err != nil {
|
|
||||||
log.Error(err, "Failed to update runner status")
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctrl.Result{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
|
||||||
return ctrl.Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if pod.Status.Phase == corev1.PodRunning {
|
if pod.Status.Phase == corev1.PodRunning {
|
||||||
for _, status := range pod.Status.ContainerStatuses {
|
for _, status := range pod.Status.ContainerStatuses {
|
||||||
if status.Name != containerName {
|
if status.Name != containerName {
|
||||||
@@ -194,24 +247,183 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
runnerBusy, err := r.isRunnerBusy(ctx, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
var registrationRecheckDelay time.Duration
|
||||||
if err != nil {
|
|
||||||
log.Error(err, "Failed to check if runner is busy")
|
|
||||||
return ctrl.Result{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// See the `newPod` function called above for more information
|
// all checks done below only decide whether a restart is needed
|
||||||
// about when this hash changes.
|
// if a restart was already decided before, there is no need for the checks
|
||||||
curHash := pod.Labels[LabelKeyPodTemplateHash]
|
// saving API calls and scary{ log messages
|
||||||
newHash := newPod.Labels[LabelKeyPodTemplateHash]
|
if !restart {
|
||||||
|
registrationCheckInterval := time.Minute
|
||||||
|
if r.RegistrationRecheckInterval > 0 {
|
||||||
|
registrationCheckInterval = r.RegistrationRecheckInterval
|
||||||
|
}
|
||||||
|
|
||||||
if !runnerBusy && curHash != newHash {
|
// We want to call ListRunners GitHub Actions API only once per runner per minute.
|
||||||
restart = true
|
// This if block, in conjunction with:
|
||||||
|
// return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil
|
||||||
|
// achieves that.
|
||||||
|
if lastCheckTime := runner.Status.LastRegistrationCheckTime; lastCheckTime != nil {
|
||||||
|
nextCheckTime := lastCheckTime.Add(registrationCheckInterval)
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
// Requeue scheduled by RequeueAfter can happen a bit earlier (like dozens of milliseconds)
|
||||||
|
// so to avoid excessive, in-effective retry, we heuristically ignore the remaining delay in case it is
|
||||||
|
// shorter than 1s
|
||||||
|
requeueAfter := nextCheckTime.Sub(now) - time.Second
|
||||||
|
if requeueAfter > 0 {
|
||||||
|
log.Info(
|
||||||
|
fmt.Sprintf("Skipped registration check because it's deferred until %s. Retrying in %s at latest", nextCheckTime, requeueAfter),
|
||||||
|
"lastRegistrationCheckTime", lastCheckTime,
|
||||||
|
"registrationCheckInterval", registrationCheckInterval,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Without RequeueAfter, the controller may not retry on scheduled. Instead, it must wait until the
|
||||||
|
// next sync period passes, which can be too much later than nextCheckTime.
|
||||||
|
//
|
||||||
|
// We need to requeue on this reconcilation even though we have already scheduled the initial
|
||||||
|
// requeue previously with `return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil`.
|
||||||
|
// Apparently, the workqueue used by controller-runtime seems to deduplicate and resets the delay on
|
||||||
|
// other requeues- so the initial scheduled requeue may have been reset due to requeue on
|
||||||
|
// spec/status change.
|
||||||
|
return ctrl.Result{RequeueAfter: requeueAfter}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
notFound := false
|
||||||
|
offline := false
|
||||||
|
|
||||||
|
runnerBusy, err := r.GitHubClient.IsRunnerBusy(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||||
|
|
||||||
|
currentTime := time.Now()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
var notFoundException *github.RunnerNotFound
|
||||||
|
var offlineException *github.RunnerOffline
|
||||||
|
if errors.As(err, ¬FoundException) {
|
||||||
|
notFound = true
|
||||||
|
} else if errors.As(err, &offlineException) {
|
||||||
|
offline = true
|
||||||
|
} else {
|
||||||
|
var e *gogithub.RateLimitError
|
||||||
|
if errors.As(err, &e) {
|
||||||
|
// We log the underlying error when we failed calling GitHub API to list or unregisters,
|
||||||
|
// or the runner is still busy.
|
||||||
|
log.Error(
|
||||||
|
err,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Failed to check if runner is busy due to Github API rate limit. Retrying in %s to avoid excessive GitHub API calls",
|
||||||
|
retryDelayOnGitHubAPIRateLimitError,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
return ctrl.Result{RequeueAfter: retryDelayOnGitHubAPIRateLimitError}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// See the `newPod` function called above for more information
|
||||||
|
// about when this hash changes.
|
||||||
|
curHash := pod.Labels[LabelKeyPodTemplateHash]
|
||||||
|
newHash := newPod.Labels[LabelKeyPodTemplateHash]
|
||||||
|
|
||||||
|
if !runnerBusy && curHash != newHash {
|
||||||
|
restart = true
|
||||||
|
}
|
||||||
|
|
||||||
|
registrationTimeout := 10 * time.Minute
|
||||||
|
durationAfterRegistrationTimeout := currentTime.Sub(pod.CreationTimestamp.Add(registrationTimeout))
|
||||||
|
registrationDidTimeout := durationAfterRegistrationTimeout > 0
|
||||||
|
|
||||||
|
if notFound {
|
||||||
|
if registrationDidTimeout {
|
||||||
|
log.Info(
|
||||||
|
"Runner failed to register itself to GitHub in timely manner. "+
|
||||||
|
"Recreating the pod to see if it resolves the issue. "+
|
||||||
|
"CAUTION: If you see this a lot, you should investigate the root cause. "+
|
||||||
|
"See https://github.com/summerwind/actions-runner-controller/issues/288",
|
||||||
|
"podCreationTimestamp", pod.CreationTimestamp,
|
||||||
|
"currentTime", currentTime,
|
||||||
|
"configuredRegistrationTimeout", registrationTimeout,
|
||||||
|
)
|
||||||
|
|
||||||
|
restart = true
|
||||||
|
} else {
|
||||||
|
log.V(1).Info(
|
||||||
|
"Runner pod exists but we failed to check if runner is busy. Apparently it still needs more time.",
|
||||||
|
"runnerName", runner.Name,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
} else if offline {
|
||||||
|
if registrationDidTimeout {
|
||||||
|
log.Info(
|
||||||
|
"Already existing GitHub runner still appears offline . "+
|
||||||
|
"Recreating the pod to see if it resolves the issue. "+
|
||||||
|
"CAUTION: If you see this a lot, you should investigate the root cause. ",
|
||||||
|
"podCreationTimestamp", pod.CreationTimestamp,
|
||||||
|
"currentTime", currentTime,
|
||||||
|
"configuredRegistrationTimeout", registrationTimeout,
|
||||||
|
)
|
||||||
|
|
||||||
|
restart = true
|
||||||
|
} else {
|
||||||
|
log.V(1).Info(
|
||||||
|
"Runner pod exists but the GitHub runner appears to be still offline. Waiting for runner to get online ...",
|
||||||
|
"runnerName", runner.Name,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (notFound || offline) && !registrationDidTimeout {
|
||||||
|
registrationRecheckJitter := 10 * time.Second
|
||||||
|
if r.RegistrationRecheckJitter > 0 {
|
||||||
|
registrationRecheckJitter = r.RegistrationRecheckJitter
|
||||||
|
}
|
||||||
|
|
||||||
|
registrationRecheckDelay = registrationCheckInterval + wait.Jitter(registrationRecheckJitter, 0.1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't do anything if there's no need to restart the runner
|
// Don't do anything if there's no need to restart the runner
|
||||||
if !restart {
|
if !restart {
|
||||||
return ctrl.Result{}, err
|
// This guard enables us to update runner.Status.Phase to `Running` only after
|
||||||
|
// the runner is registered to GitHub.
|
||||||
|
if registrationRecheckDelay > 0 {
|
||||||
|
log.V(1).Info(fmt.Sprintf("Rechecking the runner registration in %s", registrationRecheckDelay))
|
||||||
|
|
||||||
|
updated := runner.DeepCopy()
|
||||||
|
updated.Status.LastRegistrationCheckTime = &metav1.Time{Time: time.Now()}
|
||||||
|
|
||||||
|
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&runner)); err != nil {
|
||||||
|
log.Error(err, "Failed to update runner status for LastRegistrationCheckTime")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if runner.Status.Phase != string(pod.Status.Phase) {
|
||||||
|
if pod.Status.Phase == corev1.PodRunning {
|
||||||
|
// Seeing this message, you can expect the runner to become `Running` soon.
|
||||||
|
log.Info(
|
||||||
|
"Runner appears to have registered and running.",
|
||||||
|
"podCreationTimestamp", pod.CreationTimestamp,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
updated := runner.DeepCopy()
|
||||||
|
updated.Status.Phase = string(pod.Status.Phase)
|
||||||
|
updated.Status.Reason = pod.Status.Reason
|
||||||
|
updated.Status.Message = pod.Status.Message
|
||||||
|
|
||||||
|
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&runner)); err != nil {
|
||||||
|
log.Error(err, "Failed to update runner status for Phase/Reason/Message")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete current pod if recreation is needed
|
// Delete current pod if recreation is needed
|
||||||
@@ -227,23 +439,8 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerReconciler) isRunnerBusy(ctx context.Context, org, repo, name string) (bool, error) {
|
func (r *RunnerReconciler) unregisterRunner(ctx context.Context, enterprise, org, repo, name string) (bool, error) {
|
||||||
runners, err := r.GitHubClient.ListRunners(ctx, org, repo)
|
runners, err := r.GitHubClient.ListRunners(ctx, enterprise, org, repo)
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, runner := range runners {
|
|
||||||
if runner.GetName() == name {
|
|
||||||
return runner.GetBusy(), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, fmt.Errorf("runner not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RunnerReconciler) unregisterRunner(ctx context.Context, org, repo, name string) (bool, error) {
|
|
||||||
runners, err := r.GitHubClient.ListRunners(ctx, org, repo)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -263,7 +460,7 @@ func (r *RunnerReconciler) unregisterRunner(ctx context.Context, org, repo, name
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := r.GitHubClient.RemoveRunner(ctx, org, repo, id); err != nil {
|
if err := r.GitHubClient.RemoveRunner(ctx, enterprise, org, repo, id); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -277,7 +474,7 @@ func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v
|
|||||||
|
|
||||||
log := r.Log.WithValues("runner", runner.Name)
|
log := r.Log.WithValues("runner", runner.Name)
|
||||||
|
|
||||||
rt, err := r.GitHubClient.GetRegistrationToken(ctx, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
rt, err := r.GitHubClient.GetRegistrationToken(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Recorder.Event(&runner, corev1.EventTypeWarning, "FailedUpdateRegistrationToken", "Updating registration token failed")
|
r.Recorder.Event(&runner, corev1.EventTypeWarning, "FailedUpdateRegistrationToken", "Updating registration token failed")
|
||||||
log.Error(err, "Failed to get new registration token")
|
log.Error(err, "Failed to get new registration token")
|
||||||
@@ -293,8 +490,8 @@ func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v
|
|||||||
ExpiresAt: metav1.NewTime(rt.GetExpiresAt().Time),
|
ExpiresAt: metav1.NewTime(rt.GetExpiresAt().Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := r.Status().Update(ctx, updated); err != nil {
|
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&runner)); err != nil {
|
||||||
log.Error(err, "Failed to update runner status")
|
log.Error(err, "Failed to update runner status for Registration")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -339,6 +536,10 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
Name: "RUNNER_REPO",
|
Name: "RUNNER_REPO",
|
||||||
Value: runner.Spec.Repository,
|
Value: runner.Spec.Repository,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_ENTERPRISE",
|
||||||
|
Value: runner.Spec.Enterprise,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "RUNNER_LABELS",
|
Name: "RUNNER_LABELS",
|
||||||
Value: strings.Join(runner.Spec.Labels, ","),
|
Value: strings.Join(runner.Spec.Labels, ","),
|
||||||
@@ -425,45 +626,72 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if !dockerdInRunner && dockerEnabled {
|
if mtu := runner.Spec.DockerMTU; mtu != nil && dockerdInRunner {
|
||||||
runnerVolumeName := "runner"
|
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, []corev1.EnvVar{
|
||||||
runnerVolumeMountPath := "/runner"
|
|
||||||
|
|
||||||
pod.Spec.Volumes = []corev1.Volume{
|
|
||||||
{
|
{
|
||||||
|
Name: "MTU",
|
||||||
|
Value: fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// /runner must be generated on runtime from /runnertmp embedded in the container image.
|
||||||
|
//
|
||||||
|
// When you're NOT using dindWithinRunner=true,
|
||||||
|
// it must also be shared with the dind container as it seems like required to run docker steps.
|
||||||
|
//
|
||||||
|
|
||||||
|
runnerVolumeName := "runner"
|
||||||
|
runnerVolumeMountPath := "/runner"
|
||||||
|
runnerVolumeEmptyDir := &corev1.EmptyDirVolumeSource{}
|
||||||
|
|
||||||
|
if runner.Spec.VolumeSizeLimit != nil {
|
||||||
|
runnerVolumeEmptyDir.SizeLimit = runner.Spec.VolumeSizeLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||||
|
corev1.Volume{
|
||||||
|
Name: runnerVolumeName,
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
EmptyDir: runnerVolumeEmptyDir,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
|
||||||
|
corev1.VolumeMount{
|
||||||
|
Name: runnerVolumeName,
|
||||||
|
MountPath: runnerVolumeMountPath,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if !dockerdInRunner && dockerEnabled {
|
||||||
|
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||||
|
corev1.Volume{
|
||||||
Name: "work",
|
Name: "work",
|
||||||
VolumeSource: corev1.VolumeSource{
|
VolumeSource: corev1.VolumeSource{
|
||||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
corev1.Volume{
|
||||||
Name: runnerVolumeName,
|
|
||||||
VolumeSource: corev1.VolumeSource{
|
|
||||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "certs-client",
|
Name: "certs-client",
|
||||||
VolumeSource: corev1.VolumeSource{
|
VolumeSource: corev1.VolumeSource{
|
||||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
)
|
||||||
pod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
|
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
|
||||||
{
|
corev1.VolumeMount{
|
||||||
Name: "work",
|
Name: "work",
|
||||||
MountPath: workDir,
|
MountPath: workDir,
|
||||||
},
|
},
|
||||||
{
|
corev1.VolumeMount{
|
||||||
Name: runnerVolumeName,
|
|
||||||
MountPath: runnerVolumeMountPath,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "certs-client",
|
Name: "certs-client",
|
||||||
MountPath: "/certs/client",
|
MountPath: "/certs/client",
|
||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
},
|
},
|
||||||
}
|
)
|
||||||
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, []corev1.EnvVar{
|
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, []corev1.EnvVar{
|
||||||
{
|
{
|
||||||
Name: "DOCKER_HOST",
|
Name: "DOCKER_HOST",
|
||||||
@@ -478,23 +706,31 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
Value: "/certs/client",
|
Value: "/certs/client",
|
||||||
},
|
},
|
||||||
}...)
|
}...)
|
||||||
pod.Spec.Containers = append(pod.Spec.Containers, corev1.Container{
|
|
||||||
Name: "docker",
|
// Determine the volume mounts assigned to the docker sidecar. In case extra mounts are included in the RunnerSpec, append them to the standard
|
||||||
Image: r.DockerImage,
|
// set of mounts. See https://github.com/summerwind/actions-runner-controller/issues/435 for context.
|
||||||
VolumeMounts: []corev1.VolumeMount{
|
dockerVolumeMounts := []corev1.VolumeMount{
|
||||||
{
|
{
|
||||||
Name: "work",
|
Name: "work",
|
||||||
MountPath: workDir,
|
MountPath: workDir,
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: runnerVolumeName,
|
|
||||||
MountPath: runnerVolumeMountPath,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "certs-client",
|
|
||||||
MountPath: "/certs/client",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: runnerVolumeName,
|
||||||
|
MountPath: runnerVolumeMountPath,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "certs-client",
|
||||||
|
MountPath: "/certs/client",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if extraDockerVolumeMounts := runner.Spec.DockerVolumeMounts; extraDockerVolumeMounts != nil {
|
||||||
|
dockerVolumeMounts = append(dockerVolumeMounts, extraDockerVolumeMounts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
pod.Spec.Containers = append(pod.Spec.Containers, corev1.Container{
|
||||||
|
Name: "docker",
|
||||||
|
Image: r.DockerImage,
|
||||||
|
VolumeMounts: dockerVolumeMounts,
|
||||||
Env: []corev1.EnvVar{
|
Env: []corev1.EnvVar{
|
||||||
{
|
{
|
||||||
Name: "DOCKER_TLS_CERTDIR",
|
Name: "DOCKER_TLS_CERTDIR",
|
||||||
@@ -507,6 +743,21 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
Resources: runner.Spec.DockerdContainerResources,
|
Resources: runner.Spec.DockerdContainerResources,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if mtu := runner.Spec.DockerMTU; mtu != nil {
|
||||||
|
pod.Spec.Containers[1].Env = append(pod.Spec.Containers[1].Env, []corev1.EnvVar{
|
||||||
|
// See https://docs.docker.com/engine/security/rootless/
|
||||||
|
{
|
||||||
|
Name: "DOCKERD_ROOTLESS_ROOTLESSKIT_MTU",
|
||||||
|
Value: fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
|
||||||
|
pod.Spec.Containers[1].Args = append(pod.Spec.Containers[1].Args,
|
||||||
|
"--mtu",
|
||||||
|
fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(runner.Spec.Containers) != 0 {
|
if len(runner.Spec.Containers) != 0 {
|
||||||
@@ -567,6 +818,10 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
pod.Spec.TerminationGracePeriodSeconds = runner.Spec.TerminationGracePeriodSeconds
|
pod.Spec.TerminationGracePeriodSeconds = runner.Spec.TerminationGracePeriodSeconds
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(runner.Spec.HostAliases) != 0 {
|
||||||
|
pod.Spec.HostAliases = runner.Spec.HostAliases
|
||||||
|
}
|
||||||
|
|
||||||
if err := ctrl.SetControllerReference(&runner, &pod, r.Scheme); err != nil {
|
if err := ctrl.SetControllerReference(&runner, &pod, r.Scheme); err != nil {
|
||||||
return pod, err
|
return pod, err
|
||||||
}
|
}
|
||||||
@@ -575,11 +830,17 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
r.Recorder = mgr.GetEventRecorderFor("runner-controller")
|
name := "runner-controller"
|
||||||
|
if r.Name != "" {
|
||||||
|
name = r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
For(&v1alpha1.Runner{}).
|
For(&v1alpha1.Runner{}).
|
||||||
Owns(&corev1.Pod{}).
|
Owns(&corev1.Pod{}).
|
||||||
|
Named(name).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -37,10 +38,12 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/summerwind/actions-runner-controller/controllers/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
||||||
|
LabelKeyRunnerDeploymentName = "runner-deployment-name"
|
||||||
|
|
||||||
runnerSetOwnerKey = ".metadata.controller"
|
runnerSetOwnerKey = ".metadata.controller"
|
||||||
)
|
)
|
||||||
@@ -48,9 +51,11 @@ const (
|
|||||||
// RunnerDeploymentReconciler reconciles a Runner object
|
// RunnerDeploymentReconciler reconciles a Runner object
|
||||||
type RunnerDeploymentReconciler struct {
|
type RunnerDeploymentReconciler struct {
|
||||||
client.Client
|
client.Client
|
||||||
Log logr.Logger
|
Log logr.Logger
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
|
CommonRunnerLabels []string
|
||||||
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerdeployments,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerdeployments,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -73,6 +78,8 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metrics.SetRunnerDeployment(rd)
|
||||||
|
|
||||||
var myRunnerReplicaSetList v1alpha1.RunnerReplicaSetList
|
var myRunnerReplicaSetList v1alpha1.RunnerReplicaSetList
|
||||||
if err := r.List(ctx, &myRunnerReplicaSetList, client.InNamespace(req.Namespace), client.MatchingFields{runnerSetOwnerKey: req.Name}); err != nil {
|
if err := r.List(ctx, &myRunnerReplicaSetList, client.InNamespace(req.Namespace), client.MatchingFields{runnerSetOwnerKey: req.Name}); err != nil {
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
@@ -141,6 +148,28 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(newestSet.Spec.Selector, desiredRS.Spec.Selector) {
|
||||||
|
updateSet := newestSet.DeepCopy()
|
||||||
|
updateSet.Spec = *desiredRS.Spec.DeepCopy()
|
||||||
|
|
||||||
|
// A selector update change doesn't trigger replicaset replacement,
|
||||||
|
// but we still need to update the existing replicaset with it.
|
||||||
|
// Otherwise selector-based runner query will never work on replicasets created before the controller v0.17.0
|
||||||
|
// See https://github.com/summerwind/actions-runner-controller/pull/355#discussion_r585379259
|
||||||
|
if err := r.Client.Update(ctx, updateSet); err != nil {
|
||||||
|
log.Error(err, "Failed to update runnerreplicaset resource")
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point, we are already sure that there's no need to create a new replicaset
|
||||||
|
// as the runner template hash is not changed.
|
||||||
|
//
|
||||||
|
// But we still need to requeue for the (possibly rare) cases that there are still old replicasets that needs
|
||||||
|
// to be cleaned up.
|
||||||
|
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
||||||
|
}
|
||||||
|
|
||||||
const defaultReplicas = 1
|
const defaultReplicas = 1
|
||||||
|
|
||||||
currentDesiredReplicas := getIntOrDefault(newestSet.Spec.Replicas, defaultReplicas)
|
currentDesiredReplicas := getIntOrDefault(newestSet.Spec.Replicas, defaultReplicas)
|
||||||
@@ -163,21 +192,35 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
if len(oldSets) > 0 {
|
if len(oldSets) > 0 {
|
||||||
readyReplicas := newestSet.Status.ReadyReplicas
|
readyReplicas := newestSet.Status.ReadyReplicas
|
||||||
|
|
||||||
if readyReplicas < currentDesiredReplicas {
|
oldSetsCount := len(oldSets)
|
||||||
log.WithValues("runnerreplicaset", types.NamespacedName{
|
|
||||||
|
logWithDebugInfo := log.WithValues(
|
||||||
|
"newest_runnerreplicaset", types.NamespacedName{
|
||||||
Namespace: newestSet.Namespace,
|
Namespace: newestSet.Namespace,
|
||||||
Name: newestSet.Name,
|
Name: newestSet.Name,
|
||||||
}).
|
},
|
||||||
Info("Waiting until the newest runner replica set to be 100% available")
|
"newest_runnerreplicaset_replicas_ready", readyReplicas,
|
||||||
|
"newest_runnerreplicaset_replicas_desired", currentDesiredReplicas,
|
||||||
|
"old_runnerreplicasets_count", oldSetsCount,
|
||||||
|
)
|
||||||
|
|
||||||
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
|
if readyReplicas < currentDesiredReplicas {
|
||||||
|
logWithDebugInfo.
|
||||||
|
Info("Waiting until the newest runnerreplicaset to be 100% available")
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldSetsCount > 0 {
|
||||||
|
logWithDebugInfo.
|
||||||
|
Info("The newest runnerreplicaset is 100% available. Deleting old runnerreplicasets")
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range oldSets {
|
for i := range oldSets {
|
||||||
rs := oldSets[i]
|
rs := oldSets[i]
|
||||||
|
|
||||||
if err := r.Client.Delete(ctx, &rs); err != nil {
|
if err := r.Client.Delete(ctx, &rs); err != nil {
|
||||||
log.Error(err, "Failed to delete runner resource")
|
log.Error(err, "Failed to delete runnerreplicaset resource")
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
@@ -256,28 +299,94 @@ func CloneAndAddLabel(labels map[string]string, labelKey, labelValue string) map
|
|||||||
return newLabels
|
return newLabels
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerDeploymentReconciler) newRunnerReplicaSet(rd v1alpha1.RunnerDeployment) (*v1alpha1.RunnerReplicaSet, error) {
|
// Clones the given selector and returns a new selector with the given key and value added.
|
||||||
newRSTemplate := *rd.Spec.Template.DeepCopy()
|
// Returns the given selector, if labelKey is empty.
|
||||||
templateHash := ComputeHash(&newRSTemplate)
|
//
|
||||||
// Add template hash label to selector.
|
// Proudly copied from k8s.io/kubernetes/pkg/util/labels.CloneSelectorAndAddLabel
|
||||||
labels := CloneAndAddLabel(rd.Spec.Template.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
func CloneSelectorAndAddLabel(selector *metav1.LabelSelector, labelKey, labelValue string) *metav1.LabelSelector {
|
||||||
|
if labelKey == "" {
|
||||||
|
// Don't need to add a label.
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
newRSTemplate.Labels = labels
|
// Clone.
|
||||||
|
newSelector := new(metav1.LabelSelector)
|
||||||
|
|
||||||
|
newSelector.MatchLabels = make(map[string]string)
|
||||||
|
if selector.MatchLabels != nil {
|
||||||
|
for key, val := range selector.MatchLabels {
|
||||||
|
newSelector.MatchLabels[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
newSelector.MatchLabels[labelKey] = labelValue
|
||||||
|
|
||||||
|
if selector.MatchExpressions != nil {
|
||||||
|
newMExps := make([]metav1.LabelSelectorRequirement, len(selector.MatchExpressions))
|
||||||
|
for i, me := range selector.MatchExpressions {
|
||||||
|
newMExps[i].Key = me.Key
|
||||||
|
newMExps[i].Operator = me.Operator
|
||||||
|
if me.Values != nil {
|
||||||
|
newMExps[i].Values = make([]string, len(me.Values))
|
||||||
|
copy(newMExps[i].Values, me.Values)
|
||||||
|
} else {
|
||||||
|
newMExps[i].Values = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
newSelector.MatchExpressions = newMExps
|
||||||
|
} else {
|
||||||
|
newSelector.MatchExpressions = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return newSelector
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunnerDeploymentReconciler) newRunnerReplicaSet(rd v1alpha1.RunnerDeployment) (*v1alpha1.RunnerReplicaSet, error) {
|
||||||
|
return newRunnerReplicaSet(&rd, r.CommonRunnerLabels, r.Scheme)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSelector(rd *v1alpha1.RunnerDeployment) *metav1.LabelSelector {
|
||||||
|
selector := rd.Spec.Selector
|
||||||
|
if selector == nil {
|
||||||
|
selector = &metav1.LabelSelector{MatchLabels: map[string]string{LabelKeyRunnerDeploymentName: rd.Name}}
|
||||||
|
}
|
||||||
|
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []string, scheme *runtime.Scheme) (*v1alpha1.RunnerReplicaSet, error) {
|
||||||
|
newRSTemplate := *rd.Spec.Template.DeepCopy()
|
||||||
|
|
||||||
|
for _, l := range commonRunnerLabels {
|
||||||
|
newRSTemplate.Spec.Labels = append(newRSTemplate.Spec.Labels, l)
|
||||||
|
}
|
||||||
|
|
||||||
|
templateHash := ComputeHash(&newRSTemplate)
|
||||||
|
|
||||||
|
// Add template hash label to selector.
|
||||||
|
newRSTemplate.ObjectMeta.Labels = CloneAndAddLabel(newRSTemplate.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||||
|
|
||||||
|
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||||
|
newRSTemplate.ObjectMeta.Labels = CloneAndAddLabel(newRSTemplate.ObjectMeta.Labels, LabelKeyRunnerDeploymentName, rd.Name)
|
||||||
|
|
||||||
|
selector := getSelector(rd)
|
||||||
|
|
||||||
|
newRSSelector := CloneSelectorAndAddLabel(selector, LabelKeyRunnerTemplateHash, templateHash)
|
||||||
|
|
||||||
rs := v1alpha1.RunnerReplicaSet{
|
rs := v1alpha1.RunnerReplicaSet{
|
||||||
TypeMeta: metav1.TypeMeta{},
|
TypeMeta: metav1.TypeMeta{},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
GenerateName: rd.ObjectMeta.Name + "-",
|
GenerateName: rd.ObjectMeta.Name + "-",
|
||||||
Namespace: rd.ObjectMeta.Namespace,
|
Namespace: rd.ObjectMeta.Namespace,
|
||||||
Labels: labels,
|
Labels: newRSTemplate.ObjectMeta.Labels,
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.RunnerReplicaSetSpec{
|
Spec: v1alpha1.RunnerReplicaSetSpec{
|
||||||
Replicas: rd.Spec.Replicas,
|
Replicas: rd.Spec.Replicas,
|
||||||
|
Selector: newRSSelector,
|
||||||
Template: newRSTemplate,
|
Template: newRSTemplate,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ctrl.SetControllerReference(&rd, &rs, r.Scheme); err != nil {
|
if err := ctrl.SetControllerReference(rd, &rs, scheme); err != nil {
|
||||||
return &rs, err
|
return &rs, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -285,7 +394,12 @@ func (r *RunnerDeploymentReconciler) newRunnerReplicaSet(rd v1alpha1.RunnerDeplo
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *RunnerDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
r.Recorder = mgr.GetEventRecorderFor("runnerdeployment-controller")
|
name := "runnerdeployment-controller"
|
||||||
|
if r.Name != "" {
|
||||||
|
name = r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.RunnerReplicaSet{}, runnerSetOwnerKey, func(rawObj runtime.Object) []string {
|
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.RunnerReplicaSet{}, runnerSetOwnerKey, func(rawObj runtime.Object) []string {
|
||||||
runnerSet := rawObj.(*v1alpha1.RunnerReplicaSet)
|
runnerSet := rawObj.(*v1alpha1.RunnerReplicaSet)
|
||||||
@@ -306,5 +420,6 @@ func (r *RunnerDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||||||
return ctrl.NewControllerManagedBy(mgr).
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
For(&v1alpha1.RunnerDeployment{}).
|
For(&v1alpha1.RunnerDeployment{}).
|
||||||
Owns(&v1alpha1.RunnerReplicaSet{}).
|
Owns(&v1alpha1.RunnerReplicaSet{}).
|
||||||
|
Named(name).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,8 +2,13 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
@@ -18,6 +23,103 @@ import (
|
|||||||
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestNewRunnerReplicaSet(t *testing.T) {
|
||||||
|
scheme := runtime.NewScheme()
|
||||||
|
if err := actionsv1alpha1.AddToScheme(scheme); err != nil {
|
||||||
|
t.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := &RunnerDeploymentReconciler{
|
||||||
|
CommonRunnerLabels: []string{"dev"},
|
||||||
|
Scheme: scheme,
|
||||||
|
}
|
||||||
|
rd := actionsv1alpha1.RunnerDeployment{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "example",
|
||||||
|
},
|
||||||
|
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Template: actionsv1alpha1.RunnerTemplate{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
|
Labels: []string{"project1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
rs, err := r.newRunnerReplicaSet(rd)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if val, ok := rs.Labels["foo"]; ok {
|
||||||
|
if val != "bar" {
|
||||||
|
t.Errorf("foo label does not have bar but %v", val)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t.Errorf("foo label does not exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
hash1, ok := rs.Labels[LabelKeyRunnerTemplateHash]
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("missing runner-template-hash label")
|
||||||
|
}
|
||||||
|
|
||||||
|
runnerLabel := []string{"project1", "dev"}
|
||||||
|
if d := cmp.Diff(runnerLabel, rs.Spec.Template.Spec.Labels); d != "" {
|
||||||
|
t.Errorf("%s", d)
|
||||||
|
}
|
||||||
|
|
||||||
|
rd2 := rd.DeepCopy()
|
||||||
|
rd2.Spec.Template.Spec.Labels = []string{"project2"}
|
||||||
|
|
||||||
|
rs2, err := r.newRunnerReplicaSet(*rd2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hash2, ok := rs2.Labels[LabelKeyRunnerTemplateHash]
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("missing runner-template-hash label")
|
||||||
|
}
|
||||||
|
|
||||||
|
if hash1 == hash2 {
|
||||||
|
t.Errorf(
|
||||||
|
"runner replica sets from runner deployments with varying labels must have different template hash, but got %s and %s",
|
||||||
|
hash1, hash2,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
rd3 := rd.DeepCopy()
|
||||||
|
rd3.Spec.Template.Labels["foo"] = "baz"
|
||||||
|
|
||||||
|
rs3, err := r.newRunnerReplicaSet(*rd3)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hash3, ok := rs3.Labels[LabelKeyRunnerTemplateHash]
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("missing runner-template-hash label")
|
||||||
|
}
|
||||||
|
|
||||||
|
if hash1 == hash3 {
|
||||||
|
t.Errorf(
|
||||||
|
"runner replica sets from runner deployments with varying meta labels must have different template hash, but got %s and %s",
|
||||||
|
hash1, hash3,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// SetupDeploymentTest will set up a testing environment.
|
// SetupDeploymentTest will set up a testing environment.
|
||||||
// This includes:
|
// This includes:
|
||||||
// * creating a Namespace to be used during the test
|
// * creating a Namespace to be used during the test
|
||||||
@@ -37,7 +139,9 @@ func SetupDeploymentTest(ctx context.Context) *corev1.Namespace {
|
|||||||
err := k8sClient.Create(ctx, ns)
|
err := k8sClient.Create(ctx, ns)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
||||||
|
|
||||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||||
|
Namespace: ns.Name,
|
||||||
|
})
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||||
|
|
||||||
controller := &RunnerDeploymentReconciler{
|
controller := &RunnerDeploymentReconciler{
|
||||||
@@ -45,6 +149,7 @@ func SetupDeploymentTest(ctx context.Context) *corev1.Namespace {
|
|||||||
Scheme: scheme.Scheme,
|
Scheme: scheme.Scheme,
|
||||||
Log: logf.Log,
|
Log: logf.Log,
|
||||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||||
|
Name: "runnerdeployment-" + ns.Name,
|
||||||
}
|
}
|
||||||
err = controller.SetupWithManager(mgr)
|
err = controller.SetupWithManager(mgr)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||||
@@ -74,7 +179,7 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
Describe("when no existing resources exist", func() {
|
Describe("when no existing resources exist", func() {
|
||||||
|
|
||||||
It("should create a new RunnerReplicaSet resource from the specified template, add a another RunnerReplicaSet on template modification, and eventually removes old runnerreplicasets", func() {
|
It("should create a new RunnerReplicaSet resource from the specified template, add a another RunnerReplicaSet on template modification, and eventually removes old runnerreplicasets", func() {
|
||||||
name := "example-runnerdeploy"
|
name := "example-runnerdeploy-1"
|
||||||
|
|
||||||
{
|
{
|
||||||
rs := &actionsv1alpha1.RunnerDeployment{
|
rs := &actionsv1alpha1.RunnerDeployment{
|
||||||
@@ -84,9 +189,19 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||||
Replicas: intPtr(1),
|
Replicas: intPtr(1),
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
Template: actionsv1alpha1.RunnerTemplate{
|
Template: actionsv1alpha1.RunnerTemplate{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "foo/bar",
|
Repository: "test/valid",
|
||||||
Image: "bar",
|
Image: "bar",
|
||||||
Env: []corev1.EnvVar{
|
Env: []corev1.EnvVar{
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
@@ -103,29 +218,25 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
func() int {
|
func() (int, error) {
|
||||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logf.Log.Error(err, "list runner sets")
|
return 0, err
|
||||||
}
|
}
|
||||||
|
err = k8sClient.List(
|
||||||
return len(runnerSets.Items)
|
ctx,
|
||||||
},
|
&runnerSets,
|
||||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
client.InNamespace(ns.Name),
|
||||||
|
client.MatchingLabelsSelector{Selector: selector},
|
||||||
Eventually(
|
)
|
||||||
func() int {
|
|
||||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logf.Log.Error(err, "list runner sets")
|
return 0, err
|
||||||
|
}
|
||||||
|
if len(runnerSets.Items) != 1 {
|
||||||
|
return 0, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(runnerSets.Items) == 0 {
|
return *runnerSets.Items[0].Spec.Replicas, nil
|
||||||
logf.Log.Info("No runnerreplicasets exist yet")
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
return *runnerSets.Items[0].Spec.Replicas
|
|
||||||
},
|
},
|
||||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||||
}
|
}
|
||||||
@@ -134,13 +245,12 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
// We wrap the update in the Eventually block to avoid the below error that occurs due to concurrent modification
|
// We wrap the update in the Eventually block to avoid the below error that occurs due to concurrent modification
|
||||||
// made by the controller to update .Status.AvailableReplicas and .Status.ReadyReplicas
|
// made by the controller to update .Status.AvailableReplicas and .Status.ReadyReplicas
|
||||||
// Operation cannot be fulfilled on runnersets.actions.summerwind.dev "example-runnerset": the object has been modified; please apply your changes to the latest version and try again
|
// Operation cannot be fulfilled on runnersets.actions.summerwind.dev "example-runnerset": the object has been modified; please apply your changes to the latest version and try again
|
||||||
|
var rd actionsv1alpha1.RunnerDeployment
|
||||||
Eventually(func() error {
|
Eventually(func() error {
|
||||||
var rd actionsv1alpha1.RunnerDeployment
|
|
||||||
|
|
||||||
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: name}, &rd)
|
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: name}, &rd)
|
||||||
|
if err != nil {
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to get test RunnerReplicaSet resource")
|
return fmt.Errorf("failed to get test RunnerReplicaSet resource: %v\n", err)
|
||||||
|
}
|
||||||
rd.Spec.Replicas = intPtr(2)
|
rd.Spec.Replicas = intPtr(2)
|
||||||
|
|
||||||
return k8sClient.Update(ctx, &rd)
|
return k8sClient.Update(ctx, &rd)
|
||||||
@@ -150,27 +260,222 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
func() int {
|
func() (int, error) {
|
||||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
selector, err := metav1.LabelSelectorAsSelector(rd.Spec.Selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logf.Log.Error(err, "list runner sets")
|
return 0, err
|
||||||
|
}
|
||||||
|
err = k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runnerSets,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
client.MatchingLabelsSelector{Selector: selector},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if len(runnerSets.Items) != 1 {
|
||||||
|
return 0, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||||
}
|
}
|
||||||
|
|
||||||
return len(runnerSets.Items)
|
return *runnerSets.Items[0].Spec.Replicas, nil
|
||||||
},
|
|
||||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
|
||||||
|
|
||||||
Eventually(
|
|
||||||
func() int {
|
|
||||||
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
|
|
||||||
if err != nil {
|
|
||||||
logf.Log.Error(err, "list runner sets")
|
|
||||||
}
|
|
||||||
|
|
||||||
return *runnerSets.Items[0].Spec.Replicas
|
|
||||||
},
|
},
|
||||||
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(2))
|
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(2))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("should create a new RunnerReplicaSet resource from the specified template without labels and selector, add a another RunnerReplicaSet on template modification, and eventually removes old runnerreplicasets", func() {
|
||||||
|
name := "example-runnerdeploy-2"
|
||||||
|
|
||||||
|
{
|
||||||
|
rs := &actionsv1alpha1.RunnerDeployment{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: ns.Name,
|
||||||
|
},
|
||||||
|
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||||
|
Replicas: intPtr(1),
|
||||||
|
Template: actionsv1alpha1.RunnerTemplate{
|
||||||
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
|
Repository: "test/valid",
|
||||||
|
Image: "bar",
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := k8sClient.Create(ctx, rs)
|
||||||
|
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed to create test RunnerReplicaSet resource")
|
||||||
|
|
||||||
|
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (int, error) {
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
err = k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runnerSets,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
client.MatchingLabelsSelector{Selector: selector},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if len(runnerSets.Items) != 1 {
|
||||||
|
return 0, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||||
|
}
|
||||||
|
|
||||||
|
return *runnerSets.Items[0].Spec.Replicas, nil
|
||||||
|
},
|
||||||
|
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// We wrap the update in the Eventually block to avoid the below error that occurs due to concurrent modification
|
||||||
|
// made by the controller to update .Status.AvailableReplicas and .Status.ReadyReplicas
|
||||||
|
// Operation cannot be fulfilled on runnersets.actions.summerwind.dev "example-runnerset": the object has been modified; please apply your changes to the latest version and try again
|
||||||
|
var rd actionsv1alpha1.RunnerDeployment
|
||||||
|
Eventually(func() error {
|
||||||
|
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: name}, &rd)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get test RunnerReplicaSet resource: %v\n", err)
|
||||||
|
}
|
||||||
|
rd.Spec.Replicas = intPtr(2)
|
||||||
|
|
||||||
|
return k8sClient.Update(ctx, &rd)
|
||||||
|
},
|
||||||
|
time.Second*1, time.Millisecond*500).Should(BeNil())
|
||||||
|
|
||||||
|
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (int, error) {
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(rd.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
err = k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runnerSets,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
client.MatchingLabelsSelector{Selector: selector},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if len(runnerSets.Items) != 1 {
|
||||||
|
return 0, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||||
|
}
|
||||||
|
|
||||||
|
return *runnerSets.Items[0].Spec.Replicas, nil
|
||||||
|
},
|
||||||
|
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(2))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should adopt RunnerReplicaSet created before 0.18.0 to have Spec.Selector", func() {
|
||||||
|
name := "example-runnerdeploy-2"
|
||||||
|
|
||||||
|
{
|
||||||
|
rd := &actionsv1alpha1.RunnerDeployment{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: ns.Name,
|
||||||
|
},
|
||||||
|
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||||
|
Replicas: intPtr(1),
|
||||||
|
Template: actionsv1alpha1.RunnerTemplate{
|
||||||
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
|
Repository: "test/valid",
|
||||||
|
Image: "bar",
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
createRDErr := k8sClient.Create(ctx, rd)
|
||||||
|
Expect(createRDErr).NotTo(HaveOccurred(), "failed to create test RunnerReplicaSet resource")
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (int, error) {
|
||||||
|
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||||
|
|
||||||
|
err := k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runnerSets,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(runnerSets.Items), nil
|
||||||
|
},
|
||||||
|
time.Second*1, time.Millisecond*500).Should(BeEquivalentTo(1))
|
||||||
|
|
||||||
|
var rs17 *actionsv1alpha1.RunnerReplicaSet
|
||||||
|
|
||||||
|
Consistently(
|
||||||
|
func() (*metav1.LabelSelector, error) {
|
||||||
|
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||||
|
|
||||||
|
err := k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runnerSets,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(runnerSets.Items) != 1 {
|
||||||
|
return nil, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||||
|
}
|
||||||
|
|
||||||
|
rs17 = &runnerSets.Items[0]
|
||||||
|
|
||||||
|
return runnerSets.Items[0].Spec.Selector, nil
|
||||||
|
},
|
||||||
|
time.Second*1, time.Millisecond*500).Should(Not(BeNil()))
|
||||||
|
|
||||||
|
// We simulate the old, pre 0.18.0 RunnerReplicaSet by updating it.
|
||||||
|
// I've tried to use controllerutil.Set{Owner,Controller}Reference and k8sClient.Create(rs17)
|
||||||
|
// but it didn't work due to missing RD UID, where UID is generated on K8s API server on k8sCLient.Create(rd)
|
||||||
|
rs17.Spec.Selector = nil
|
||||||
|
|
||||||
|
updateRSErr := k8sClient.Update(ctx, rs17)
|
||||||
|
Expect(updateRSErr).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
Eventually(
|
||||||
|
func() (*metav1.LabelSelector, error) {
|
||||||
|
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||||
|
|
||||||
|
err := k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runnerSets,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(runnerSets.Items) != 1 {
|
||||||
|
return nil, fmt.Errorf("runnerreplicasets is not 1 but %d", len(runnerSets.Items))
|
||||||
|
}
|
||||||
|
|
||||||
|
return runnerSets.Items[0].Spec.Selector, nil
|
||||||
|
},
|
||||||
|
time.Second*1, time.Millisecond*500).Should(Not(BeNil()))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -18,10 +18,14 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
gogithub "github.com/google/go-github/v33/github"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
@@ -41,6 +45,7 @@ type RunnerReplicaSetReconciler struct {
|
|||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
Scheme *runtime.Scheme
|
Scheme *runtime.Scheme
|
||||||
GitHubClient *github.Client
|
GitHubClient *github.Client
|
||||||
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerreplicasets,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerreplicasets,verbs=get;list;watch;create;update;patch;delete
|
||||||
@@ -63,18 +68,33 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
// Get the Runners managed by the target RunnerReplicaSet
|
||||||
var allRunners v1alpha1.RunnerList
|
var allRunners v1alpha1.RunnerList
|
||||||
if err := r.List(ctx, &allRunners, client.InNamespace(req.Namespace)); err != nil {
|
if err := r.List(
|
||||||
if !errors.IsNotFound(err) {
|
ctx,
|
||||||
|
&allRunners,
|
||||||
|
client.InNamespace(req.Namespace),
|
||||||
|
client.MatchingLabelsSelector{Selector: selector},
|
||||||
|
); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var myRunners []v1alpha1.Runner
|
var myRunners []v1alpha1.Runner
|
||||||
|
|
||||||
var available, ready int
|
var (
|
||||||
|
available int
|
||||||
|
ready int
|
||||||
|
)
|
||||||
|
|
||||||
for _, r := range allRunners.Items {
|
for _, r := range allRunners.Items {
|
||||||
|
// This guard is required to avoid the RunnerReplicaSet created by the controller v0.17.0 or before
|
||||||
|
// to not treat all the runners in the namespace as its children.
|
||||||
if metav1.IsControlledBy(&r, &rs) {
|
if metav1.IsControlledBy(&r, &rs) {
|
||||||
myRunners = append(myRunners, r)
|
myRunners = append(myRunners, r)
|
||||||
|
|
||||||
@@ -94,41 +114,92 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
desired = 1
|
desired = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
log.V(0).Info("debug", "desired", desired, "available", available)
|
|
||||||
|
|
||||||
if available > desired {
|
if available > desired {
|
||||||
n := available - desired
|
n := available - desired
|
||||||
|
|
||||||
// get runners that are currently not busy
|
log.V(0).Info(fmt.Sprintf("Deleting %d runners", n), "desired", desired, "available", available, "ready", ready)
|
||||||
var notBusy []v1alpha1.Runner
|
|
||||||
for _, runner := range myRunners {
|
// get runners that are currently offline/not busy/timed-out to register
|
||||||
busy, err := r.isRunnerBusy(ctx, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
var deletionCandidates []v1alpha1.Runner
|
||||||
|
|
||||||
|
for _, runner := range allRunners.Items {
|
||||||
|
busy, err := r.GitHubClient.IsRunnerBusy(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err, "Failed to check if runner is busy")
|
notRegistered := false
|
||||||
return ctrl.Result{}, err
|
offline := false
|
||||||
}
|
|
||||||
if !busy {
|
var notFoundException *github.RunnerNotFound
|
||||||
notBusy = append(notBusy, runner)
|
var offlineException *github.RunnerOffline
|
||||||
|
if errors.As(err, ¬FoundException) {
|
||||||
|
log.V(1).Info("Failed to check if runner is busy. Either this runner has never been successfully registered to GitHub or it still needs more time.", "runnerName", runner.Name)
|
||||||
|
notRegistered = true
|
||||||
|
} else if errors.As(err, &offlineException) {
|
||||||
|
offline = true
|
||||||
|
} else {
|
||||||
|
var e *gogithub.RateLimitError
|
||||||
|
if errors.As(err, &e) {
|
||||||
|
// We log the underlying error when we failed calling GitHub API to list or unregisters,
|
||||||
|
// or the runner is still busy.
|
||||||
|
log.Error(
|
||||||
|
err,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Failed to check if runner is busy due to GitHub API rate limit. Retrying in %s to avoid excessive GitHub API calls",
|
||||||
|
retryDelayOnGitHubAPIRateLimitError,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
return ctrl.Result{RequeueAfter: retryDelayOnGitHubAPIRateLimitError}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
registrationTimeout := 15 * time.Minute
|
||||||
|
currentTime := time.Now()
|
||||||
|
registrationDidTimeout := currentTime.Sub(runner.CreationTimestamp.Add(registrationTimeout)) > 0
|
||||||
|
|
||||||
|
if notRegistered && registrationDidTimeout {
|
||||||
|
log.Info(
|
||||||
|
"Runner failed to register itself to GitHub in timely manner. "+
|
||||||
|
"Marking the runner for scale down. "+
|
||||||
|
"CAUTION: If you see this a lot, you should investigate the root cause. "+
|
||||||
|
"See https://github.com/summerwind/actions-runner-controller/issues/288",
|
||||||
|
"runnerCreationTimestamp", runner.CreationTimestamp,
|
||||||
|
"currentTime", currentTime,
|
||||||
|
"configuredRegistrationTimeout", registrationTimeout,
|
||||||
|
)
|
||||||
|
|
||||||
|
deletionCandidates = append(deletionCandidates, runner)
|
||||||
|
}
|
||||||
|
|
||||||
|
// offline runners should always be a great target for scale down
|
||||||
|
if offline {
|
||||||
|
deletionCandidates = append(deletionCandidates, runner)
|
||||||
|
}
|
||||||
|
} else if !busy {
|
||||||
|
deletionCandidates = append(deletionCandidates, runner)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(notBusy) < n {
|
if len(deletionCandidates) < n {
|
||||||
n = len(notBusy)
|
n = len(deletionCandidates)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
if err := r.Client.Delete(ctx, ¬Busy[i]); err != nil {
|
if err := r.Client.Delete(ctx, &deletionCandidates[i]); client.IgnoreNotFound(err) != nil {
|
||||||
log.Error(err, "Failed to delete runner resource")
|
log.Error(err, "Failed to delete runner resource")
|
||||||
|
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
r.Recorder.Event(&rs, corev1.EventTypeNormal, "RunnerDeleted", fmt.Sprintf("Deleted runner '%s'", myRunners[i].Name))
|
r.Recorder.Event(&rs, corev1.EventTypeNormal, "RunnerDeleted", fmt.Sprintf("Deleted runner '%s'", deletionCandidates[i].Name))
|
||||||
log.Info("Deleted runner", "runnerreplicaset", rs.ObjectMeta.Name)
|
log.Info("Deleted runner")
|
||||||
}
|
}
|
||||||
} else if desired > available {
|
} else if desired > available {
|
||||||
n := desired - available
|
n := desired - available
|
||||||
|
|
||||||
|
log.V(0).Info(fmt.Sprintf("Creating %d runner(s)", n), "desired", desired, "available", available, "ready", ready)
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
newRunner, err := r.newRunner(rs)
|
newRunner, err := r.newRunner(rs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -151,8 +222,10 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
updated.Status.ReadyReplicas = ready
|
updated.Status.ReadyReplicas = ready
|
||||||
|
|
||||||
if err := r.Status().Update(ctx, updated); err != nil {
|
if err := r.Status().Update(ctx, updated); err != nil {
|
||||||
log.Error(err, "Failed to update runner status")
|
log.Info("Failed to update status. Retrying immediately", "error", err.Error())
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{
|
||||||
|
Requeue: true,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -179,26 +252,16 @@ func (r *RunnerReplicaSetReconciler) newRunner(rs v1alpha1.RunnerReplicaSet) (v1
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerReplicaSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *RunnerReplicaSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
r.Recorder = mgr.GetEventRecorderFor("runnerreplicaset-controller")
|
name := "runnerreplicaset-controller"
|
||||||
|
if r.Name != "" {
|
||||||
|
name = r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
return ctrl.NewControllerManagedBy(mgr).
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
For(&v1alpha1.RunnerReplicaSet{}).
|
For(&v1alpha1.RunnerReplicaSet{}).
|
||||||
Owns(&v1alpha1.Runner{}).
|
Owns(&v1alpha1.Runner{}).
|
||||||
|
Named(name).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerReplicaSetReconciler) isRunnerBusy(ctx context.Context, org, repo, name string) (bool, error) {
|
|
||||||
runners, err := r.GitHubClient.ListRunners(ctx, org, repo)
|
|
||||||
r.Log.Info("runners", "github", runners)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, runner := range runners {
|
|
||||||
if runner.GetName() == name {
|
|
||||||
return runner.GetBusy(), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, fmt.Errorf("runner not found")
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -47,7 +47,9 @@ func SetupTest(ctx context.Context) *corev1.Namespace {
|
|||||||
err := k8sClient.Create(ctx, ns)
|
err := k8sClient.Create(ctx, ns)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
||||||
|
|
||||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||||
|
Namespace: ns.Name,
|
||||||
|
})
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||||
|
|
||||||
runnersList = fake.NewRunnersList()
|
runnersList = fake.NewRunnersList()
|
||||||
@@ -60,6 +62,7 @@ func SetupTest(ctx context.Context) *corev1.Namespace {
|
|||||||
Log: logf.Log,
|
Log: logf.Log,
|
||||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||||
GitHubClient: ghClient,
|
GitHubClient: ghClient,
|
||||||
|
Name: "runnerreplicaset-" + ns.Name,
|
||||||
}
|
}
|
||||||
err = controller.SetupWithManager(mgr)
|
err = controller.SetupWithManager(mgr)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||||
@@ -114,9 +117,19 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerReplicaSetSpec{
|
Spec: actionsv1alpha1.RunnerReplicaSetSpec{
|
||||||
Replicas: intPtr(1),
|
Replicas: intPtr(1),
|
||||||
|
Selector: &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
Template: actionsv1alpha1.RunnerTemplate{
|
Template: actionsv1alpha1.RunnerTemplate{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "foo/bar",
|
Repository: "test/valid",
|
||||||
Image: "bar",
|
Image: "bar",
|
||||||
Env: []corev1.EnvVar{
|
Env: []corev1.EnvVar{
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
@@ -134,9 +147,26 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
func() int {
|
func() int {
|
||||||
err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name))
|
selector, err := metav1.LabelSelectorAsSelector(
|
||||||
|
&metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
logf.Log.Error(err, "failed to create labelselector")
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
err = k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runners,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
client.MatchingLabelsSelector{Selector: selector},
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logf.Log.Error(err, "list runners")
|
logf.Log.Error(err, "list runners")
|
||||||
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, runner := range runners.Items {
|
for i, runner := range runners.Items {
|
||||||
@@ -175,7 +205,23 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
func() int {
|
func() int {
|
||||||
err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name))
|
selector, err := metav1.LabelSelectorAsSelector(
|
||||||
|
&metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
logf.Log.Error(err, "failed to create labelselector")
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
err = k8sClient.List(
|
||||||
|
ctx,
|
||||||
|
&runners,
|
||||||
|
client.InNamespace(ns.Name),
|
||||||
|
client.MatchingLabelsSelector{Selector: selector},
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logf.Log.Error(err, "list runners")
|
logf.Log.Error(err, "list runners")
|
||||||
}
|
}
|
||||||
@@ -219,6 +265,7 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name))
|
err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logf.Log.Error(err, "list runners")
|
logf.Log.Error(err, "list runners")
|
||||||
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, runner := range runners.Items {
|
for i, runner := range runners.Items {
|
||||||
|
|||||||
@@ -17,6 +17,8 @@ limitations under the License.
|
|||||||
package controllers
|
package controllers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -43,6 +45,8 @@ var testEnv *envtest.Environment
|
|||||||
func TestAPIs(t *testing.T) {
|
func TestAPIs(t *testing.T) {
|
||||||
RegisterFailHandler(Fail)
|
RegisterFailHandler(Fail)
|
||||||
|
|
||||||
|
config.GinkgoConfig.FocusString = os.Getenv("GINKGO_FOCUS")
|
||||||
|
|
||||||
RunSpecsWithDefaultAndCustomReporters(t,
|
RunSpecsWithDefaultAndCustomReporters(t,
|
||||||
"Controller Suite",
|
"Controller Suite",
|
||||||
[]Reporter{envtest.NewlineReporter{}})
|
[]Reporter{envtest.NewlineReporter{}})
|
||||||
@@ -51,9 +55,17 @@ func TestAPIs(t *testing.T) {
|
|||||||
var _ = BeforeSuite(func(done Done) {
|
var _ = BeforeSuite(func(done Done) {
|
||||||
logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
|
logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
|
||||||
|
|
||||||
|
var apiServerFlags []string
|
||||||
|
|
||||||
|
apiServerFlags = append(apiServerFlags, envtest.DefaultKubeAPIServerFlags...)
|
||||||
|
// Avoids the following error:
|
||||||
|
// 2021-03-19T15:14:11.673+0900 ERROR controller-runtime.controller Reconciler error {"controller": "testns-tvjzjrunner", "request": "testns-gdnyx/example-runnerdeploy-zps4z-j5562", "error": "Pod \"example-runnerdeploy-zps4z-j5562\" is invalid: [spec.containers[1].image: Required value, spec.containers[1].securityContext.privileged: Forbidden: disallowed by cluster policy]"}
|
||||||
|
apiServerFlags = append(apiServerFlags, "--allow-privileged=true")
|
||||||
|
|
||||||
By("bootstrapping test environment")
|
By("bootstrapping test environment")
|
||||||
testEnv = &envtest.Environment{
|
testEnv = &envtest.Environment{
|
||||||
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
|
||||||
|
KubeAPIServerFlags: apiServerFlags,
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
|||||||
373
controllers/testdata/org_webhook_check_run_payload.json
vendored
Normal file
373
controllers/testdata/org_webhook_check_run_payload.json
vendored
Normal file
@@ -0,0 +1,373 @@
|
|||||||
|
{
|
||||||
|
"action": "created",
|
||||||
|
"check_run": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"head_sha": "1234567890123456789012345678901234567890",
|
||||||
|
"external_id": "92058b04-f16a-5035-546c-cae3ad5e2f5f",
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/check-runs/123467890",
|
||||||
|
"html_url": "https://github.com/MYORG/MYREPO/runs/123467890",
|
||||||
|
"details_url": "https://github.com/MYORG/MYREPO/runs/123467890",
|
||||||
|
"status": "queued",
|
||||||
|
"conclusion": null,
|
||||||
|
"started_at": "2021-02-18T06:16:31Z",
|
||||||
|
"completed_at": null,
|
||||||
|
"output": {
|
||||||
|
"title": null,
|
||||||
|
"summary": null,
|
||||||
|
"text": null,
|
||||||
|
"annotations_count": 0,
|
||||||
|
"annotations_url": "https://api.github.com/repos/MYORG/MYREPO/check-runs/123467890/annotations"
|
||||||
|
},
|
||||||
|
"name": "validate",
|
||||||
|
"check_suite": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"head_branch": "MYNAME/actions-runner-controller-webhook",
|
||||||
|
"head_sha": "1234567890123456789012345678901234567890",
|
||||||
|
"status": "queued",
|
||||||
|
"conclusion": null,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/check-suites/1234567890",
|
||||||
|
"before": "1234567890123456789012345678901234567890",
|
||||||
|
"after": "1234567890123456789012345678901234567890",
|
||||||
|
"pull_requests": [
|
||||||
|
{
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/pulls/2033",
|
||||||
|
"id": 1234567890,
|
||||||
|
"number": 1234567890,
|
||||||
|
"head": {
|
||||||
|
"ref": "feature",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"base": {
|
||||||
|
"ref": "master",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"app": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"slug": "github-actions",
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"owner": {
|
||||||
|
"login": "github",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/123467890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/github",
|
||||||
|
"html_url": "https://github.com/github",
|
||||||
|
"followers_url": "https://api.github.com/users/github/followers",
|
||||||
|
"following_url": "https://api.github.com/users/github/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/github/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/github/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/github/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/github/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/github/repos",
|
||||||
|
"events_url": "https://api.github.com/users/github/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/github/received_events",
|
||||||
|
"type": "Organization",
|
||||||
|
"site_admin": false
|
||||||
|
},
|
||||||
|
"name": "GitHub Actions",
|
||||||
|
"description": "Automate your workflow from idea to production",
|
||||||
|
"external_url": "https://help.github.com/en/actions",
|
||||||
|
"html_url": "https://github.com/apps/github-actions",
|
||||||
|
"created_at": "2018-07-30T09:30:17Z",
|
||||||
|
"updated_at": "2019-12-10T19:04:12Z",
|
||||||
|
"permissions": {
|
||||||
|
"actions": "write",
|
||||||
|
"checks": "write",
|
||||||
|
"contents": "write",
|
||||||
|
"deployments": "write",
|
||||||
|
"issues": "write",
|
||||||
|
"metadata": "read",
|
||||||
|
"organization_packages": "write",
|
||||||
|
"packages": "write",
|
||||||
|
"pages": "write",
|
||||||
|
"pull_requests": "write",
|
||||||
|
"repository_hooks": "write",
|
||||||
|
"repository_projects": "write",
|
||||||
|
"security_events": "write",
|
||||||
|
"statuses": "write",
|
||||||
|
"vulnerability_alerts": "read"
|
||||||
|
},
|
||||||
|
"events": [
|
||||||
|
"check_run",
|
||||||
|
"check_suite",
|
||||||
|
"create",
|
||||||
|
"delete",
|
||||||
|
"deployment",
|
||||||
|
"deployment_status",
|
||||||
|
"fork",
|
||||||
|
"gollum",
|
||||||
|
"issues",
|
||||||
|
"issue_comment",
|
||||||
|
"label",
|
||||||
|
"milestone",
|
||||||
|
"page_build",
|
||||||
|
"project",
|
||||||
|
"project_card",
|
||||||
|
"project_column",
|
||||||
|
"public",
|
||||||
|
"pull_request",
|
||||||
|
"pull_request_review",
|
||||||
|
"pull_request_review_comment",
|
||||||
|
"push",
|
||||||
|
"registry_package",
|
||||||
|
"release",
|
||||||
|
"repository",
|
||||||
|
"repository_dispatch",
|
||||||
|
"status",
|
||||||
|
"watch",
|
||||||
|
"workflow_dispatch",
|
||||||
|
"workflow_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"created_at": "2021-02-18T06:15:32Z",
|
||||||
|
"updated_at": "2021-02-18T06:16:31Z"
|
||||||
|
},
|
||||||
|
"app": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"slug": "github-actions",
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"owner": {
|
||||||
|
"login": "github",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/1234567890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/github",
|
||||||
|
"html_url": "https://github.com/github",
|
||||||
|
"followers_url": "https://api.github.com/users/github/followers",
|
||||||
|
"following_url": "https://api.github.com/users/github/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/github/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/github/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/github/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/github/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/github/repos",
|
||||||
|
"events_url": "https://api.github.com/users/github/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/github/received_events",
|
||||||
|
"type": "Organization",
|
||||||
|
"site_admin": false
|
||||||
|
},
|
||||||
|
"name": "GitHub Actions",
|
||||||
|
"description": "Automate your workflow from idea to production",
|
||||||
|
"external_url": "https://help.github.com/en/actions",
|
||||||
|
"html_url": "https://github.com/apps/github-actions",
|
||||||
|
"created_at": "2018-07-30T09:30:17Z",
|
||||||
|
"updated_at": "2019-12-10T19:04:12Z",
|
||||||
|
"permissions": {
|
||||||
|
"actions": "write",
|
||||||
|
"checks": "write",
|
||||||
|
"contents": "write",
|
||||||
|
"deployments": "write",
|
||||||
|
"issues": "write",
|
||||||
|
"metadata": "read",
|
||||||
|
"organization_packages": "write",
|
||||||
|
"packages": "write",
|
||||||
|
"pages": "write",
|
||||||
|
"pull_requests": "write",
|
||||||
|
"repository_hooks": "write",
|
||||||
|
"repository_projects": "write",
|
||||||
|
"security_events": "write",
|
||||||
|
"statuses": "write",
|
||||||
|
"vulnerability_alerts": "read"
|
||||||
|
},
|
||||||
|
"events": [
|
||||||
|
"check_run",
|
||||||
|
"check_suite",
|
||||||
|
"create",
|
||||||
|
"delete",
|
||||||
|
"deployment",
|
||||||
|
"deployment_status",
|
||||||
|
"fork",
|
||||||
|
"gollum",
|
||||||
|
"issues",
|
||||||
|
"issue_comment",
|
||||||
|
"label",
|
||||||
|
"milestone",
|
||||||
|
"page_build",
|
||||||
|
"project",
|
||||||
|
"project_card",
|
||||||
|
"project_column",
|
||||||
|
"public",
|
||||||
|
"pull_request",
|
||||||
|
"pull_request_review",
|
||||||
|
"pull_request_review_comment",
|
||||||
|
"push",
|
||||||
|
"registry_package",
|
||||||
|
"release",
|
||||||
|
"repository",
|
||||||
|
"repository_dispatch",
|
||||||
|
"status",
|
||||||
|
"watch",
|
||||||
|
"workflow_dispatch",
|
||||||
|
"workflow_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"pull_requests": [
|
||||||
|
{
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/pulls/1234567890",
|
||||||
|
"id": 1234567890,
|
||||||
|
"number": 1234567890,
|
||||||
|
"head": {
|
||||||
|
"ref": "feature",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"base": {
|
||||||
|
"ref": "master",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"name": "MYREPO",
|
||||||
|
"full_name": "MYORG/MYREPO",
|
||||||
|
"private": true,
|
||||||
|
"owner": {
|
||||||
|
"login": "MYORG",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/1234567890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/MYORG",
|
||||||
|
"html_url": "https://github.com/MYORG",
|
||||||
|
"followers_url": "https://api.github.com/users/MYORG/followers",
|
||||||
|
"following_url": "https://api.github.com/users/MYORG/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/MYORG/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/MYORG/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/MYORG/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/MYORG/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/MYORG/repos",
|
||||||
|
"events_url": "https://api.github.com/users/MYORG/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/MYORG/received_events",
|
||||||
|
"type": "Organization",
|
||||||
|
"site_admin": false
|
||||||
|
},
|
||||||
|
"html_url": "https://github.com/MYORG/MYREPO",
|
||||||
|
"description": "MYREPO",
|
||||||
|
"fork": false,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"forks_url": "https://api.github.com/repos/MYORG/MYREPO/forks",
|
||||||
|
"keys_url": "https://api.github.com/repos/MYORG/MYREPO/keys{/key_id}",
|
||||||
|
"collaborators_url": "https://api.github.com/repos/MYORG/MYREPO/collaborators{/collaborator}",
|
||||||
|
"teams_url": "https://api.github.com/repos/MYORG/MYREPO/teams",
|
||||||
|
"hooks_url": "https://api.github.com/repos/MYORG/MYREPO/hooks",
|
||||||
|
"issue_events_url": "https://api.github.com/repos/MYORG/MYREPO/issues/events{/number}",
|
||||||
|
"events_url": "https://api.github.com/repos/MYORG/MYREPO/events",
|
||||||
|
"assignees_url": "https://api.github.com/repos/MYORG/MYREPO/assignees{/user}",
|
||||||
|
"branches_url": "https://api.github.com/repos/MYORG/MYREPO/branches{/branch}",
|
||||||
|
"tags_url": "https://api.github.com/repos/MYORG/MYREPO/tags",
|
||||||
|
"blobs_url": "https://api.github.com/repos/MYORG/MYREPO/git/blobs{/sha}",
|
||||||
|
"git_tags_url": "https://api.github.com/repos/MYORG/MYREPO/git/tags{/sha}",
|
||||||
|
"git_refs_url": "https://api.github.com/repos/MYORG/MYREPO/git/refs{/sha}",
|
||||||
|
"trees_url": "https://api.github.com/repos/MYORG/MYREPO/git/trees{/sha}",
|
||||||
|
"statuses_url": "https://api.github.com/repos/MYORG/MYREPO/statuses/{sha}",
|
||||||
|
"languages_url": "https://api.github.com/repos/MYORG/MYREPO/languages",
|
||||||
|
"stargazers_url": "https://api.github.com/repos/MYORG/MYREPO/stargazers",
|
||||||
|
"contributors_url": "https://api.github.com/repos/MYORG/MYREPO/contributors",
|
||||||
|
"subscribers_url": "https://api.github.com/repos/MYORG/MYREPO/subscribers",
|
||||||
|
"subscription_url": "https://api.github.com/repos/MYORG/MYREPO/subscription",
|
||||||
|
"commits_url": "https://api.github.com/repos/MYORG/MYREPO/commits{/sha}",
|
||||||
|
"git_commits_url": "https://api.github.com/repos/MYORG/MYREPO/git/commits{/sha}",
|
||||||
|
"comments_url": "https://api.github.com/repos/MYORG/MYREPO/comments{/number}",
|
||||||
|
"issue_comment_url": "https://api.github.com/repos/MYORG/MYREPO/issues/comments{/number}",
|
||||||
|
"contents_url": "https://api.github.com/repos/MYORG/MYREPO/contents/{+path}",
|
||||||
|
"compare_url": "https://api.github.com/repos/MYORG/MYREPO/compare/{base}...{head}",
|
||||||
|
"merges_url": "https://api.github.com/repos/MYORG/MYREPO/merges",
|
||||||
|
"archive_url": "https://api.github.com/repos/MYORG/MYREPO/{archive_format}{/ref}",
|
||||||
|
"downloads_url": "https://api.github.com/repos/MYORG/MYREPO/downloads",
|
||||||
|
"issues_url": "https://api.github.com/repos/MYORG/MYREPO/issues{/number}",
|
||||||
|
"pulls_url": "https://api.github.com/repos/MYORG/MYREPO/pulls{/number}",
|
||||||
|
"milestones_url": "https://api.github.com/repos/MYORG/MYREPO/milestones{/number}",
|
||||||
|
"notifications_url": "https://api.github.com/repos/MYORG/MYREPO/notifications{?since,all,participating}",
|
||||||
|
"labels_url": "https://api.github.com/repos/MYORG/MYREPO/labels{/name}",
|
||||||
|
"releases_url": "https://api.github.com/repos/MYORG/MYREPO/releases{/id}",
|
||||||
|
"deployments_url": "https://api.github.com/repos/MYORG/MYREPO/deployments",
|
||||||
|
"created_at": "2017-08-10T02:21:10Z",
|
||||||
|
"updated_at": "2021-02-18T04:40:55Z",
|
||||||
|
"pushed_at": "2021-02-18T06:15:30Z",
|
||||||
|
"git_url": "git://github.com/MYORG/MYREPO.git",
|
||||||
|
"ssh_url": "git@github.com:MYORG/MYREPO.git",
|
||||||
|
"clone_url": "https://github.com/MYORG/MYREPO.git",
|
||||||
|
"svn_url": "https://github.com/MYORG/MYREPO",
|
||||||
|
"homepage": null,
|
||||||
|
"size": 30782,
|
||||||
|
"stargazers_count": 2,
|
||||||
|
"watchers_count": 2,
|
||||||
|
"language": "Shell",
|
||||||
|
"has_issues": false,
|
||||||
|
"has_projects": true,
|
||||||
|
"has_downloads": true,
|
||||||
|
"has_wiki": false,
|
||||||
|
"has_pages": false,
|
||||||
|
"forks_count": 0,
|
||||||
|
"mirror_url": null,
|
||||||
|
"archived": false,
|
||||||
|
"disabled": false,
|
||||||
|
"open_issues_count": 6,
|
||||||
|
"license": null,
|
||||||
|
"forks": 0,
|
||||||
|
"open_issues": 6,
|
||||||
|
"watchers": 2,
|
||||||
|
"default_branch": "master"
|
||||||
|
},
|
||||||
|
"organization": {
|
||||||
|
"login": "MYORG",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"url": "https://api.github.com/orgs/MYORG",
|
||||||
|
"repos_url": "https://api.github.com/orgs/MYORG/repos",
|
||||||
|
"events_url": "https://api.github.com/orgs/MYORG/events",
|
||||||
|
"hooks_url": "https://api.github.com/orgs/MYORG/hooks",
|
||||||
|
"issues_url": "https://api.github.com/orgs/MYORG/issues",
|
||||||
|
"members_url": "https://api.github.com/orgs/MYORG/members{/member}",
|
||||||
|
"public_members_url": "https://api.github.com/orgs/MYORG/public_members{/member}",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/1234567890?v=4",
|
||||||
|
"description": ""
|
||||||
|
},
|
||||||
|
"sender": {
|
||||||
|
"login": "MYNAME",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/1234567890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/MYNAME",
|
||||||
|
"html_url": "https://github.com/MYNAME",
|
||||||
|
"followers_url": "https://api.github.com/users/MYNAME/followers",
|
||||||
|
"following_url": "https://api.github.com/users/MYNAME/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/MYNAME/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/MYNAME/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/MYNAME/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/MYNAME/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/MYNAME/repos",
|
||||||
|
"events_url": "https://api.github.com/users/MYNAME/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/MYNAME/received_events",
|
||||||
|
"type": "User",
|
||||||
|
"site_admin": false
|
||||||
|
}
|
||||||
|
}
|
||||||
360
controllers/testdata/repo_webhook_check_run_payload.json
vendored
Normal file
360
controllers/testdata/repo_webhook_check_run_payload.json
vendored
Normal file
@@ -0,0 +1,360 @@
|
|||||||
|
{
|
||||||
|
"action": "completed",
|
||||||
|
"check_run": {
|
||||||
|
"id": 1949438388,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"head_sha": "1234567890123456789012345678901234567890",
|
||||||
|
"external_id": "ca395085-040a-526b-2ce8-bdc85f692774",
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/check-runs/123467890",
|
||||||
|
"html_url": "https://github.com/MYORG/MYREPO/runs/123467890",
|
||||||
|
"details_url": "https://github.com/MYORG/MYREPO/runs/123467890",
|
||||||
|
"status": "queued",
|
||||||
|
"conclusion": null,
|
||||||
|
"started_at": "2021-02-18T06:16:31Z",
|
||||||
|
"completed_at": null,
|
||||||
|
"output": {
|
||||||
|
"title": null,
|
||||||
|
"summary": null,
|
||||||
|
"text": null,
|
||||||
|
"annotations_count": 0,
|
||||||
|
"annotations_url": "https://api.github.com/repos/MYORG/MYREPO/check-runs/123467890/annotations"
|
||||||
|
},
|
||||||
|
"name": "build",
|
||||||
|
"name": "validate",
|
||||||
|
"check_suite": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"head_branch": "MYNAME/actions-runner-controller-webhook",
|
||||||
|
"head_sha": "1234567890123456789012345678901234567890",
|
||||||
|
"status": "queued",
|
||||||
|
"conclusion": null,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/check-suites/1234567890",
|
||||||
|
"before": "1234567890123456789012345678901234567890",
|
||||||
|
"after": "1234567890123456789012345678901234567890",
|
||||||
|
"pull_requests": [
|
||||||
|
{
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/pulls/2033",
|
||||||
|
"id": 1234567890,
|
||||||
|
"number": 1234567890,
|
||||||
|
"head": {
|
||||||
|
"ref": "feature",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"base": {
|
||||||
|
"ref": "master",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"app": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"slug": "github-actions",
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"owner": {
|
||||||
|
"login": "github",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/123467890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/github",
|
||||||
|
"html_url": "https://github.com/github",
|
||||||
|
"followers_url": "https://api.github.com/users/github/followers",
|
||||||
|
"following_url": "https://api.github.com/users/github/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/github/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/github/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/github/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/github/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/github/repos",
|
||||||
|
"events_url": "https://api.github.com/users/github/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/github/received_events",
|
||||||
|
"type": "Organization",
|
||||||
|
"site_admin": false
|
||||||
|
},
|
||||||
|
"name": "GitHub Actions",
|
||||||
|
"description": "Automate your workflow from idea to production",
|
||||||
|
"external_url": "https://help.github.com/en/actions",
|
||||||
|
"html_url": "https://github.com/apps/github-actions",
|
||||||
|
"created_at": "2018-07-30T09:30:17Z",
|
||||||
|
"updated_at": "2019-12-10T19:04:12Z",
|
||||||
|
"permissions": {
|
||||||
|
"actions": "write",
|
||||||
|
"checks": "write",
|
||||||
|
"contents": "write",
|
||||||
|
"deployments": "write",
|
||||||
|
"issues": "write",
|
||||||
|
"metadata": "read",
|
||||||
|
"organization_packages": "write",
|
||||||
|
"packages": "write",
|
||||||
|
"pages": "write",
|
||||||
|
"pull_requests": "write",
|
||||||
|
"repository_hooks": "write",
|
||||||
|
"repository_projects": "write",
|
||||||
|
"security_events": "write",
|
||||||
|
"statuses": "write",
|
||||||
|
"vulnerability_alerts": "read"
|
||||||
|
},
|
||||||
|
"events": [
|
||||||
|
"check_run",
|
||||||
|
"check_suite",
|
||||||
|
"create",
|
||||||
|
"delete",
|
||||||
|
"deployment",
|
||||||
|
"deployment_status",
|
||||||
|
"fork",
|
||||||
|
"gollum",
|
||||||
|
"issues",
|
||||||
|
"issue_comment",
|
||||||
|
"label",
|
||||||
|
"milestone",
|
||||||
|
"page_build",
|
||||||
|
"project",
|
||||||
|
"project_card",
|
||||||
|
"project_column",
|
||||||
|
"public",
|
||||||
|
"pull_request",
|
||||||
|
"pull_request_review",
|
||||||
|
"pull_request_review_comment",
|
||||||
|
"push",
|
||||||
|
"registry_package",
|
||||||
|
"release",
|
||||||
|
"repository",
|
||||||
|
"repository_dispatch",
|
||||||
|
"status",
|
||||||
|
"watch",
|
||||||
|
"workflow_dispatch",
|
||||||
|
"workflow_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"created_at": "2021-02-18T06:15:32Z",
|
||||||
|
"updated_at": "2021-02-18T06:16:31Z"
|
||||||
|
},
|
||||||
|
"app": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"slug": "github-actions",
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"owner": {
|
||||||
|
"login": "github",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/1234567890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/github",
|
||||||
|
"html_url": "https://github.com/github",
|
||||||
|
"followers_url": "https://api.github.com/users/github/followers",
|
||||||
|
"following_url": "https://api.github.com/users/github/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/github/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/github/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/github/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/github/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/github/repos",
|
||||||
|
"events_url": "https://api.github.com/users/github/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/github/received_events",
|
||||||
|
"type": "Organization",
|
||||||
|
"site_admin": false
|
||||||
|
},
|
||||||
|
"name": "GitHub Actions",
|
||||||
|
"description": "Automate your workflow from idea to production",
|
||||||
|
"external_url": "https://help.github.com/en/actions",
|
||||||
|
"html_url": "https://github.com/apps/github-actions",
|
||||||
|
"created_at": "2018-07-30T09:30:17Z",
|
||||||
|
"updated_at": "2019-12-10T19:04:12Z",
|
||||||
|
"permissions": {
|
||||||
|
"actions": "write",
|
||||||
|
"checks": "write",
|
||||||
|
"contents": "write",
|
||||||
|
"deployments": "write",
|
||||||
|
"issues": "write",
|
||||||
|
"metadata": "read",
|
||||||
|
"organization_packages": "write",
|
||||||
|
"packages": "write",
|
||||||
|
"pages": "write",
|
||||||
|
"pull_requests": "write",
|
||||||
|
"repository_hooks": "write",
|
||||||
|
"repository_projects": "write",
|
||||||
|
"security_events": "write",
|
||||||
|
"statuses": "write",
|
||||||
|
"vulnerability_alerts": "read"
|
||||||
|
},
|
||||||
|
"events": [
|
||||||
|
"check_run",
|
||||||
|
"check_suite",
|
||||||
|
"create",
|
||||||
|
"delete",
|
||||||
|
"deployment",
|
||||||
|
"deployment_status",
|
||||||
|
"fork",
|
||||||
|
"gollum",
|
||||||
|
"issues",
|
||||||
|
"issue_comment",
|
||||||
|
"label",
|
||||||
|
"milestone",
|
||||||
|
"page_build",
|
||||||
|
"project",
|
||||||
|
"project_card",
|
||||||
|
"project_column",
|
||||||
|
"public",
|
||||||
|
"pull_request",
|
||||||
|
"pull_request_review",
|
||||||
|
"pull_request_review_comment",
|
||||||
|
"push",
|
||||||
|
"registry_package",
|
||||||
|
"release",
|
||||||
|
"repository",
|
||||||
|
"repository_dispatch",
|
||||||
|
"status",
|
||||||
|
"watch",
|
||||||
|
"workflow_dispatch",
|
||||||
|
"workflow_run"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"pull_requests": [
|
||||||
|
{
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO/pulls/1234567890",
|
||||||
|
"id": 1234567890,
|
||||||
|
"number": 1234567890,
|
||||||
|
"head": {
|
||||||
|
"ref": "feature",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"base": {
|
||||||
|
"ref": "master",
|
||||||
|
"sha": "1234567890123456789012345678901234567890",
|
||||||
|
"repo": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"url": "https://api.github.com/repos/MYORG/MYREPO",
|
||||||
|
"name": "MYREPO"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"name": "MYREPO",
|
||||||
|
"full_name": "MYORG/MYREPO",
|
||||||
|
"private": true,
|
||||||
|
"owner": {
|
||||||
|
"login": "MYUSER",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/1234567890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/MYUSER",
|
||||||
|
"html_url": "https://github.com/MYUSER",
|
||||||
|
"followers_url": "https://api.github.com/users/MYUSER/followers",
|
||||||
|
"following_url": "https://api.github.com/users/MYUSER/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/MYUSER/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/MYUSER/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/MYUSER/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/MYUSER/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/MYUSER/repos",
|
||||||
|
"events_url": "https://api.github.com/users/MYUSER/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/MYUSER/received_events",
|
||||||
|
"type": "User",
|
||||||
|
"site_admin": false
|
||||||
|
},
|
||||||
|
"html_url": "https://github.com/MYUSER/MYREPO",
|
||||||
|
"description": null,
|
||||||
|
"fork": false,
|
||||||
|
"url": "https://api.github.com/repos/MYUSER/MYREPO",
|
||||||
|
"forks_url": "https://api.github.com/repos/MYUSER/MYREPO/forks",
|
||||||
|
"keys_url": "https://api.github.com/repos/MYUSER/MYREPO/keys{/key_id}",
|
||||||
|
"collaborators_url": "https://api.github.com/repos/MYUSER/MYREPO/collaborators{/collaborator}",
|
||||||
|
"teams_url": "https://api.github.com/repos/MYUSER/MYREPO/teams",
|
||||||
|
"hooks_url": "https://api.github.com/repos/MYUSER/MYREPO/hooks",
|
||||||
|
"issue_events_url": "https://api.github.com/repos/MYUSER/MYREPO/issues/events{/number}",
|
||||||
|
"events_url": "https://api.github.com/repos/MYUSER/MYREPO/events",
|
||||||
|
"assignees_url": "https://api.github.com/repos/MYUSER/MYREPO/assignees{/user}",
|
||||||
|
"branches_url": "https://api.github.com/repos/MYUSER/MYREPO/branches{/branch}",
|
||||||
|
"tags_url": "https://api.github.com/repos/MYUSER/MYREPO/tags",
|
||||||
|
"blobs_url": "https://api.github.com/repos/MYUSER/MYREPO/git/blobs{/sha}",
|
||||||
|
"git_tags_url": "https://api.github.com/repos/MYUSER/MYREPO/git/tags{/sha}",
|
||||||
|
"git_refs_url": "https://api.github.com/repos/MYUSER/MYREPO/git/refs{/sha}",
|
||||||
|
"trees_url": "https://api.github.com/repos/MYUSER/MYREPO/git/trees{/sha}",
|
||||||
|
"statuses_url": "https://api.github.com/repos/MYUSER/MYREPO/statuses/{sha}",
|
||||||
|
"languages_url": "https://api.github.com/repos/MYUSER/MYREPO/languages",
|
||||||
|
"stargazers_url": "https://api.github.com/repos/MYUSER/MYREPO/stargazers",
|
||||||
|
"contributors_url": "https://api.github.com/repos/MYUSER/MYREPO/contributors",
|
||||||
|
"subscribers_url": "https://api.github.com/repos/MYUSER/MYREPO/subscribers",
|
||||||
|
"subscription_url": "https://api.github.com/repos/MYUSER/MYREPO/subscription",
|
||||||
|
"commits_url": "https://api.github.com/repos/MYUSER/MYREPO/commits{/sha}",
|
||||||
|
"git_commits_url": "https://api.github.com/repos/MYUSER/MYREPO/git/commits{/sha}",
|
||||||
|
"comments_url": "https://api.github.com/repos/MYUSER/MYREPO/comments{/number}",
|
||||||
|
"issue_comment_url": "https://api.github.com/repos/MYUSER/MYREPO/issues/comments{/number}",
|
||||||
|
"contents_url": "https://api.github.com/repos/MYUSER/MYREPO/contents/{+path}",
|
||||||
|
"compare_url": "https://api.github.com/repos/MYUSER/MYREPO/compare/{base}...{head}",
|
||||||
|
"merges_url": "https://api.github.com/repos/MYUSER/MYREPO/merges",
|
||||||
|
"archive_url": "https://api.github.com/repos/MYUSER/MYREPO/{archive_format}{/ref}",
|
||||||
|
"downloads_url": "https://api.github.com/repos/MYUSER/MYREPO/downloads",
|
||||||
|
"issues_url": "https://api.github.com/repos/MYUSER/MYREPO/issues{/number}",
|
||||||
|
"pulls_url": "https://api.github.com/repos/MYUSER/MYREPO/pulls{/number}",
|
||||||
|
"milestones_url": "https://api.github.com/repos/MYUSER/MYREPO/milestones{/number}",
|
||||||
|
"notifications_url": "https://api.github.com/repos/MYUSER/MYREPO/notifications{?since,all,participating}",
|
||||||
|
"labels_url": "https://api.github.com/repos/MYUSER/MYREPO/labels{/name}",
|
||||||
|
"releases_url": "https://api.github.com/repos/MYUSER/MYREPO/releases{/id}",
|
||||||
|
"deployments_url": "https://api.github.com/repos/MYUSER/MYREPO/deployments",
|
||||||
|
"created_at": "2021-02-18T06:16:31Z",
|
||||||
|
"updated_at": "2021-02-18T06:16:31Z",
|
||||||
|
"pushed_at": "2021-02-18T06:16:31Z",
|
||||||
|
"git_url": "git://github.com/MYUSER/MYREPO.git",
|
||||||
|
"ssh_url": "git@github.com:MYUSER/MYREPO.git",
|
||||||
|
"clone_url": "https://github.com/MYUSER/MYREPO.git",
|
||||||
|
"svn_url": "https://github.com/MYUSER/MYREPO",
|
||||||
|
"homepage": null,
|
||||||
|
"size": 4,
|
||||||
|
"stargazers_count": 0,
|
||||||
|
"watchers_count": 0,
|
||||||
|
"language": null,
|
||||||
|
"has_issues": true,
|
||||||
|
"has_projects": true,
|
||||||
|
"has_downloads": true,
|
||||||
|
"has_wiki": true,
|
||||||
|
"has_pages": false,
|
||||||
|
"forks_count": 0,
|
||||||
|
"mirror_url": null,
|
||||||
|
"archived": false,
|
||||||
|
"disabled": false,
|
||||||
|
"open_issues_count": 0,
|
||||||
|
"license": null,
|
||||||
|
"forks": 0,
|
||||||
|
"open_issues": 0,
|
||||||
|
"watchers": 0,
|
||||||
|
"default_branch": "main"
|
||||||
|
},
|
||||||
|
"sender": {
|
||||||
|
"login": "MYUSER",
|
||||||
|
"id": 1234567890,
|
||||||
|
"node_id": "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
|
||||||
|
"avatar_url": "https://avatars.githubusercontent.com/u/1234567890?v=4",
|
||||||
|
"gravatar_id": "",
|
||||||
|
"url": "https://api.github.com/users/MYUSER",
|
||||||
|
"html_url": "https://github.com/MYUSER",
|
||||||
|
"followers_url": "https://api.github.com/users/MYUSER/followers",
|
||||||
|
"following_url": "https://api.github.com/users/MYUSER/following{/other_user}",
|
||||||
|
"gists_url": "https://api.github.com/users/MYUSER/gists{/gist_id}",
|
||||||
|
"starred_url": "https://api.github.com/users/MYUSER/starred{/owner}{/repo}",
|
||||||
|
"subscriptions_url": "https://api.github.com/users/MYUSER/subscriptions",
|
||||||
|
"organizations_url": "https://api.github.com/users/MYUSER/orgs",
|
||||||
|
"repos_url": "https://api.github.com/users/MYUSER/repos",
|
||||||
|
"events_url": "https://api.github.com/users/MYUSER/events{/privacy}",
|
||||||
|
"received_events_url": "https://api.github.com/users/MYUSER/received_events",
|
||||||
|
"type": "User",
|
||||||
|
"site_admin": false
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -24,13 +24,34 @@ const (
|
|||||||
`
|
`
|
||||||
)
|
)
|
||||||
|
|
||||||
type Handler struct {
|
type ListRunnersHandler struct {
|
||||||
Status int
|
Status int
|
||||||
Body string
|
Body string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *ListRunnersHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||||
|
w.WriteHeader(h.Status)
|
||||||
|
fmt.Fprintf(w, h.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Handler struct {
|
||||||
|
Status int
|
||||||
|
Body string
|
||||||
|
|
||||||
|
Statuses map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||||
w.WriteHeader(h.Status)
|
w.WriteHeader(h.Status)
|
||||||
|
|
||||||
|
status := req.URL.Query().Get("status")
|
||||||
|
if h.Statuses != nil {
|
||||||
|
if body, ok := h.Statuses[status]; ok {
|
||||||
|
fmt.Fprintf(w, body)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Fprintf(w, h.Body)
|
fmt.Fprintf(w, h.Body)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -92,12 +113,21 @@ func NewServer(opts ...Option) *httptest.Server {
|
|||||||
Status: http.StatusBadRequest,
|
Status: http.StatusBadRequest,
|
||||||
Body: "",
|
Body: "",
|
||||||
},
|
},
|
||||||
|
"/enterprises/test/actions/runners/registration-token": &Handler{
|
||||||
|
Status: http.StatusCreated,
|
||||||
|
Body: fmt.Sprintf("{\"token\": \"%s\", \"expires_at\": \"%s\"}", RegistrationToken, time.Now().Add(time.Hour*1).Format(time.RFC3339)),
|
||||||
|
},
|
||||||
|
"/enterprises/invalid/actions/runners/registration-token": &Handler{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Body: fmt.Sprintf("{\"token\": \"%s\", \"expires_at\": \"%s\"}", RegistrationToken, time.Now().Add(time.Hour*1).Format(time.RFC3339)),
|
||||||
|
},
|
||||||
|
"/enterprises/error/actions/runners/registration-token": &Handler{
|
||||||
|
Status: http.StatusBadRequest,
|
||||||
|
Body: "",
|
||||||
|
},
|
||||||
|
|
||||||
// For ListRunners
|
// For ListRunners
|
||||||
"/repos/test/valid/actions/runners": &Handler{
|
"/repos/test/valid/actions/runners": config.FixedResponses.ListRunners,
|
||||||
Status: http.StatusOK,
|
|
||||||
Body: RunnersListBody,
|
|
||||||
},
|
|
||||||
"/repos/test/invalid/actions/runners": &Handler{
|
"/repos/test/invalid/actions/runners": &Handler{
|
||||||
Status: http.StatusNoContent,
|
Status: http.StatusNoContent,
|
||||||
Body: "",
|
Body: "",
|
||||||
@@ -118,6 +148,18 @@ func NewServer(opts ...Option) *httptest.Server {
|
|||||||
Status: http.StatusBadRequest,
|
Status: http.StatusBadRequest,
|
||||||
Body: "",
|
Body: "",
|
||||||
},
|
},
|
||||||
|
"/enterprises/test/actions/runners": &Handler{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Body: RunnersListBody,
|
||||||
|
},
|
||||||
|
"/enterprises/invalid/actions/runners": &Handler{
|
||||||
|
Status: http.StatusNoContent,
|
||||||
|
Body: "",
|
||||||
|
},
|
||||||
|
"/enterprises/error/actions/runners": &Handler{
|
||||||
|
Status: http.StatusBadRequest,
|
||||||
|
Body: "",
|
||||||
|
},
|
||||||
|
|
||||||
// For RemoveRunner
|
// For RemoveRunner
|
||||||
"/repos/test/valid/actions/runners/1": &Handler{
|
"/repos/test/valid/actions/runners/1": &Handler{
|
||||||
@@ -144,6 +186,18 @@ func NewServer(opts ...Option) *httptest.Server {
|
|||||||
Status: http.StatusBadRequest,
|
Status: http.StatusBadRequest,
|
||||||
Body: "",
|
Body: "",
|
||||||
},
|
},
|
||||||
|
"/enterprises/test/actions/runners/1": &Handler{
|
||||||
|
Status: http.StatusNoContent,
|
||||||
|
Body: "",
|
||||||
|
},
|
||||||
|
"/enterprises/invalid/actions/runners/1": &Handler{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Body: "",
|
||||||
|
},
|
||||||
|
"/enterprises/error/actions/runners/1": &Handler{
|
||||||
|
Status: http.StatusBadRequest,
|
||||||
|
Body: "",
|
||||||
|
},
|
||||||
|
|
||||||
// For auto-scaling based on the number of queued(pending) workflow runs
|
// For auto-scaling based on the number of queued(pending) workflow runs
|
||||||
"/repos/test/valid/actions/runs": config.FixedResponses.ListRepositoryWorkflowRuns,
|
"/repos/test/valid/actions/runs": config.FixedResponses.ListRepositoryWorkflowRuns,
|
||||||
@@ -159,3 +213,10 @@ func NewServer(opts ...Option) *httptest.Server {
|
|||||||
|
|
||||||
return httptest.NewServer(mux)
|
return httptest.NewServer(mux)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DefaultListRunnersHandler() *ListRunnersHandler {
|
||||||
|
return &ListRunnersHandler{
|
||||||
|
Status: http.StatusOK,
|
||||||
|
Body: RunnersListBody,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,17 +1,24 @@
|
|||||||
package fake
|
package fake
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
type FixedResponses struct {
|
type FixedResponses struct {
|
||||||
ListRepositoryWorkflowRuns *Handler
|
ListRepositoryWorkflowRuns *Handler
|
||||||
ListWorkflowJobs *MapHandler
|
ListWorkflowJobs *MapHandler
|
||||||
|
ListRunners http.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
type Option func(*ServerConfig)
|
type Option func(*ServerConfig)
|
||||||
|
|
||||||
func WithListRepositoryWorkflowRunsResponse(status int, body string) Option {
|
func WithListRepositoryWorkflowRunsResponse(status int, body, queued, in_progress string) Option {
|
||||||
return func(c *ServerConfig) {
|
return func(c *ServerConfig) {
|
||||||
c.FixedResponses.ListRepositoryWorkflowRuns = &Handler{
|
c.FixedResponses.ListRepositoryWorkflowRuns = &Handler{
|
||||||
Status: status,
|
Status: status,
|
||||||
Body: body,
|
Body: body,
|
||||||
|
Statuses: map[string]string{
|
||||||
|
"queued": queued,
|
||||||
|
"in_progress": in_progress,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -25,6 +32,15 @@ func WithListWorkflowJobsResponse(status int, bodies map[int]string) Option {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func WithListRunnersResponse(status int, body string) Option {
|
||||||
|
return func(c *ServerConfig) {
|
||||||
|
c.FixedResponses.ListRunners = &ListRunnersHandler{
|
||||||
|
Status: status,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func WithFixedResponses(responses *FixedResponses) Option {
|
func WithFixedResponses(responses *FixedResponses) Option {
|
||||||
return func(c *ServerConfig) {
|
return func(c *ServerConfig) {
|
||||||
c.FixedResponses = responses
|
c.FixedResponses = responses
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package fake
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -29,15 +30,15 @@ func (r *RunnersList) Add(runner *github.Runner) {
|
|||||||
func (r *RunnersList) GetServer() *httptest.Server {
|
func (r *RunnersList) GetServer() *httptest.Server {
|
||||||
router := mux.NewRouter()
|
router := mux.NewRouter()
|
||||||
|
|
||||||
router.Handle("/repos/{owner}/{repo}/actions/runners", r.handleList())
|
router.Handle("/repos/{owner}/{repo}/actions/runners", r.HandleList())
|
||||||
router.Handle("/repos/{owner}/{repo}/actions/runners/{id}", r.handleRemove())
|
router.Handle("/repos/{owner}/{repo}/actions/runners/{id}", r.handleRemove())
|
||||||
router.Handle("/orgs/{org}/actions/runners", r.handleList())
|
router.Handle("/orgs/{org}/actions/runners", r.HandleList())
|
||||||
router.Handle("/orgs/{org}/actions/runners/{id}", r.handleRemove())
|
router.Handle("/orgs/{org}/actions/runners/{id}", r.handleRemove())
|
||||||
|
|
||||||
return httptest.NewServer(router)
|
return httptest.NewServer(router)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnersList) handleList() http.HandlerFunc {
|
func (r *RunnersList) HandleList() http.HandlerFunc {
|
||||||
return func(w http.ResponseWriter, res *http.Request) {
|
return func(w http.ResponseWriter, res *http.Request) {
|
||||||
j, err := json.Marshal(github.Runners{
|
j, err := json.Marshal(github.Runners{
|
||||||
TotalCount: len(r.runners),
|
TotalCount: len(r.runners),
|
||||||
@@ -64,6 +65,20 @@ func (r *RunnersList) handleRemove() http.HandlerFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *RunnersList) Sync(runners []v1alpha1.Runner) {
|
||||||
|
r.runners = nil
|
||||||
|
|
||||||
|
for i, want := range runners {
|
||||||
|
r.Add(&github.Runner{
|
||||||
|
ID: github.Int64(int64(i)),
|
||||||
|
Name: github.String(want.Name),
|
||||||
|
OS: github.String("linux"),
|
||||||
|
Status: github.String("online"),
|
||||||
|
Busy: github.Bool(false),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func exists(runners []*github.Runner, runner *github.Runner) bool {
|
func exists(runners []*github.Runner, runner *github.Runner) bool {
|
||||||
for _, r := range runners {
|
for _, r := range runners {
|
||||||
if *r.Name == *runner.Name {
|
if *r.Name == *runner.Name {
|
||||||
|
|||||||
221
github/github.go
221
github/github.go
@@ -5,12 +5,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/bradleyfalzon/ghinstallation"
|
"github.com/bradleyfalzon/ghinstallation"
|
||||||
"github.com/google/go-github/v33/github"
|
"github.com/google/go-github/v33/github"
|
||||||
|
"github.com/summerwind/actions-runner-controller/github/metrics"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -34,20 +36,24 @@ type Client struct {
|
|||||||
|
|
||||||
// NewClient creates a Github Client
|
// NewClient creates a Github Client
|
||||||
func (c *Config) NewClient() (*Client, error) {
|
func (c *Config) NewClient() (*Client, error) {
|
||||||
var (
|
var transport http.RoundTripper
|
||||||
httpClient *http.Client
|
|
||||||
client *github.Client
|
|
||||||
)
|
|
||||||
githubBaseURL := "https://github.com/"
|
|
||||||
if len(c.Token) > 0 {
|
if len(c.Token) > 0 {
|
||||||
httpClient = oauth2.NewClient(context.Background(), oauth2.StaticTokenSource(
|
transport = oauth2.NewClient(context.Background(), oauth2.StaticTokenSource(&oauth2.Token{AccessToken: c.Token})).Transport
|
||||||
&oauth2.Token{AccessToken: c.Token},
|
|
||||||
))
|
|
||||||
} else {
|
} else {
|
||||||
tr, err := ghinstallation.NewKeyFromFile(http.DefaultTransport, c.AppID, c.AppInstallationID, c.AppPrivateKey)
|
var tr *ghinstallation.Transport
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("authentication failed: %v", err)
|
if _, err := os.Stat(c.AppPrivateKey); err == nil {
|
||||||
|
tr, err = ghinstallation.NewKeyFromFile(http.DefaultTransport, c.AppID, c.AppInstallationID, c.AppPrivateKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("authentication failed: using private key at %s: %v", c.AppPrivateKey, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tr, err = ghinstallation.New(http.DefaultTransport, c.AppID, c.AppInstallationID, []byte(c.AppPrivateKey))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("authentication failed: using private key of size %d (%s...): %v", len(c.AppPrivateKey), strings.Split(c.AppPrivateKey, "\n")[0], err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(c.EnterpriseURL) > 0 {
|
if len(c.EnterpriseURL) > 0 {
|
||||||
githubAPIURL, err := getEnterpriseApiUrl(c.EnterpriseURL)
|
githubAPIURL, err := getEnterpriseApiUrl(c.EnterpriseURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -55,9 +61,13 @@ func (c *Config) NewClient() (*Client, error) {
|
|||||||
}
|
}
|
||||||
tr.BaseURL = githubAPIURL
|
tr.BaseURL = githubAPIURL
|
||||||
}
|
}
|
||||||
httpClient = &http.Client{Transport: tr}
|
transport = tr
|
||||||
}
|
}
|
||||||
|
transport = metrics.Transport{Transport: transport}
|
||||||
|
httpClient := &http.Client{Transport: transport}
|
||||||
|
|
||||||
|
var client *github.Client
|
||||||
|
var githubBaseURL string
|
||||||
if len(c.EnterpriseURL) > 0 {
|
if len(c.EnterpriseURL) > 0 {
|
||||||
var err error
|
var err error
|
||||||
client, err = github.NewEnterpriseClient(c.EnterpriseURL, c.EnterpriseURL, httpClient)
|
client, err = github.NewEnterpriseClient(c.EnterpriseURL, c.EnterpriseURL, httpClient)
|
||||||
@@ -67,6 +77,7 @@ func (c *Config) NewClient() (*Client, error) {
|
|||||||
githubBaseURL = fmt.Sprintf("%s://%s%s", client.BaseURL.Scheme, client.BaseURL.Host, strings.TrimSuffix(client.BaseURL.Path, "api/v3/"))
|
githubBaseURL = fmt.Sprintf("%s://%s%s", client.BaseURL.Scheme, client.BaseURL.Host, strings.TrimSuffix(client.BaseURL.Path, "api/v3/"))
|
||||||
} else {
|
} else {
|
||||||
client = github.NewClient(httpClient)
|
client = github.NewClient(httpClient)
|
||||||
|
githubBaseURL = "https://github.com/"
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Client{
|
return &Client{
|
||||||
@@ -78,24 +89,27 @@ func (c *Config) NewClient() (*Client, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetRegistrationToken returns a registration token tied with the name of repository and runner.
|
// GetRegistrationToken returns a registration token tied with the name of repository and runner.
|
||||||
func (c *Client) GetRegistrationToken(ctx context.Context, org, repo, name string) (*github.RegistrationToken, error) {
|
func (c *Client) GetRegistrationToken(ctx context.Context, enterprise, org, repo, name string) (*github.RegistrationToken, error) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
key := getRegistrationKey(org, repo)
|
key := getRegistrationKey(org, repo, enterprise)
|
||||||
rt, ok := c.regTokens[key]
|
rt, ok := c.regTokens[key]
|
||||||
|
|
||||||
if ok && rt.GetExpiresAt().After(time.Now()) {
|
// we like to give runners a chance that are just starting up and may miss the expiration date by a bit
|
||||||
|
runnerStartupTimeout := 3 * time.Minute
|
||||||
|
|
||||||
|
if ok && rt.GetExpiresAt().After(time.Now().Add(runnerStartupTimeout)) {
|
||||||
return rt, nil
|
return rt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
owner, repo, err := getOwnerAndRepo(org, repo)
|
enterprise, owner, repo, err := getEnterpriseOrganisationAndRepo(enterprise, org, repo)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return rt, err
|
return rt, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rt, res, err := c.createRegistrationToken(ctx, owner, repo)
|
rt, res, err := c.createRegistrationToken(ctx, enterprise, owner, repo)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create registration token: %v", err)
|
return nil, fmt.Errorf("failed to create registration token: %v", err)
|
||||||
@@ -114,17 +128,17 @@ func (c *Client) GetRegistrationToken(ctx context.Context, org, repo, name strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RemoveRunner removes a runner with specified runner ID from repository.
|
// RemoveRunner removes a runner with specified runner ID from repository.
|
||||||
func (c *Client) RemoveRunner(ctx context.Context, org, repo string, runnerID int64) error {
|
func (c *Client) RemoveRunner(ctx context.Context, enterprise, org, repo string, runnerID int64) error {
|
||||||
owner, repo, err := getOwnerAndRepo(org, repo)
|
enterprise, owner, repo, err := getEnterpriseOrganisationAndRepo(enterprise, org, repo)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := c.removeRunner(ctx, owner, repo, runnerID)
|
res, err := c.removeRunner(ctx, enterprise, owner, repo, runnerID)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to remove runner: %v", err)
|
return fmt.Errorf("failed to remove runner: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if res.StatusCode != 204 {
|
if res.StatusCode != 204 {
|
||||||
@@ -135,8 +149,8 @@ func (c *Client) RemoveRunner(ctx context.Context, org, repo string, runnerID in
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ListRunners returns a list of runners of specified owner/repository name.
|
// ListRunners returns a list of runners of specified owner/repository name.
|
||||||
func (c *Client) ListRunners(ctx context.Context, org, repo string) ([]*github.Runner, error) {
|
func (c *Client) ListRunners(ctx context.Context, enterprise, org, repo string) ([]*github.Runner, error) {
|
||||||
owner, repo, err := getOwnerAndRepo(org, repo)
|
enterprise, owner, repo, err := getEnterpriseOrganisationAndRepo(enterprise, org, repo)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -144,12 +158,12 @@ func (c *Client) ListRunners(ctx context.Context, org, repo string) ([]*github.R
|
|||||||
|
|
||||||
var runners []*github.Runner
|
var runners []*github.Runner
|
||||||
|
|
||||||
opts := github.ListOptions{PerPage: 10}
|
opts := github.ListOptions{PerPage: 100}
|
||||||
for {
|
for {
|
||||||
list, res, err := c.listRunners(ctx, owner, repo, &opts)
|
list, res, err := c.listRunners(ctx, enterprise, owner, repo, &opts)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return runners, fmt.Errorf("failed to list runners: %v", err)
|
return runners, fmt.Errorf("failed to list runners: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
runners = append(runners, list.Runners...)
|
runners = append(runners, list.Runners...)
|
||||||
@@ -174,49 +188,102 @@ func (c *Client) cleanup() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// wrappers for github functions (switch between organization/repository mode)
|
// wrappers for github functions (switch between enterprise/organization/repository mode)
|
||||||
// so the calling functions don't need to switch and their code is a bit cleaner
|
// so the calling functions don't need to switch and their code is a bit cleaner
|
||||||
|
|
||||||
func (c *Client) createRegistrationToken(ctx context.Context, owner, repo string) (*github.RegistrationToken, *github.Response, error) {
|
func (c *Client) createRegistrationToken(ctx context.Context, enterprise, org, repo string) (*github.RegistrationToken, *github.Response, error) {
|
||||||
if len(repo) > 0 {
|
if len(repo) > 0 {
|
||||||
return c.Client.Actions.CreateRegistrationToken(ctx, owner, repo)
|
return c.Client.Actions.CreateRegistrationToken(ctx, org, repo)
|
||||||
}
|
|
||||||
|
|
||||||
return c.Client.Actions.CreateOrganizationRegistrationToken(ctx, owner)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) removeRunner(ctx context.Context, owner, repo string, runnerID int64) (*github.Response, error) {
|
|
||||||
if len(repo) > 0 {
|
|
||||||
return c.Client.Actions.RemoveRunner(ctx, owner, repo, runnerID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.Client.Actions.RemoveOrganizationRunner(ctx, owner, runnerID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) listRunners(ctx context.Context, owner, repo string, opts *github.ListOptions) (*github.Runners, *github.Response, error) {
|
|
||||||
if len(repo) > 0 {
|
|
||||||
return c.Client.Actions.ListRunners(ctx, owner, repo, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.Client.Actions.ListOrganizationRunners(ctx, owner, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validates owner and repo arguments. Both are optional, but at least one should be specified
|
|
||||||
func getOwnerAndRepo(org, repo string) (string, string, error) {
|
|
||||||
if len(repo) > 0 {
|
|
||||||
return splitOwnerAndRepo(repo)
|
|
||||||
}
|
}
|
||||||
if len(org) > 0 {
|
if len(org) > 0 {
|
||||||
return org, "", nil
|
return c.Client.Actions.CreateOrganizationRegistrationToken(ctx, org)
|
||||||
}
|
}
|
||||||
return "", "", fmt.Errorf("organization and repository are both empty")
|
return c.Client.Enterprise.CreateRegistrationToken(ctx, enterprise)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRegistrationKey(org, repo string) string {
|
func (c *Client) removeRunner(ctx context.Context, enterprise, org, repo string, runnerID int64) (*github.Response, error) {
|
||||||
if len(org) > 0 {
|
if len(repo) > 0 {
|
||||||
return org
|
return c.Client.Actions.RemoveRunner(ctx, org, repo, runnerID)
|
||||||
}
|
}
|
||||||
return repo
|
if len(org) > 0 {
|
||||||
|
return c.Client.Actions.RemoveOrganizationRunner(ctx, org, runnerID)
|
||||||
|
}
|
||||||
|
return c.Client.Enterprise.RemoveRunner(ctx, enterprise, runnerID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) listRunners(ctx context.Context, enterprise, org, repo string, opts *github.ListOptions) (*github.Runners, *github.Response, error) {
|
||||||
|
if len(repo) > 0 {
|
||||||
|
return c.Client.Actions.ListRunners(ctx, org, repo, opts)
|
||||||
|
}
|
||||||
|
if len(org) > 0 {
|
||||||
|
return c.Client.Actions.ListOrganizationRunners(ctx, org, opts)
|
||||||
|
}
|
||||||
|
return c.Client.Enterprise.ListRunners(ctx, enterprise, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) ListRepositoryWorkflowRuns(ctx context.Context, user string, repoName string) ([]*github.WorkflowRun, error) {
|
||||||
|
queued, err := c.listRepositoryWorkflowRuns(ctx, user, repoName, "queued")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("listing queued workflow runs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
inProgress, err := c.listRepositoryWorkflowRuns(ctx, user, repoName, "in_progress")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("listing in_progress workflow runs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var workflowRuns []*github.WorkflowRun
|
||||||
|
|
||||||
|
workflowRuns = append(workflowRuns, queued...)
|
||||||
|
workflowRuns = append(workflowRuns, inProgress...)
|
||||||
|
|
||||||
|
return workflowRuns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) listRepositoryWorkflowRuns(ctx context.Context, user string, repoName, status string) ([]*github.WorkflowRun, error) {
|
||||||
|
var workflowRuns []*github.WorkflowRun
|
||||||
|
|
||||||
|
opts := github.ListWorkflowRunsOptions{
|
||||||
|
ListOptions: github.ListOptions{
|
||||||
|
PerPage: 100,
|
||||||
|
},
|
||||||
|
Status: status,
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
list, res, err := c.Client.Actions.ListRepositoryWorkflowRuns(ctx, user, repoName, &opts)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return workflowRuns, fmt.Errorf("failed to list workflow runs: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
workflowRuns = append(workflowRuns, list.WorkflowRuns...)
|
||||||
|
if res.NextPage == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
opts.Page = res.NextPage
|
||||||
|
}
|
||||||
|
|
||||||
|
return workflowRuns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validates enterprise, organisation and repo arguments. Both are optional, but at least one should be specified
|
||||||
|
func getEnterpriseOrganisationAndRepo(enterprise, org, repo string) (string, string, string, error) {
|
||||||
|
if len(repo) > 0 {
|
||||||
|
owner, repository, err := splitOwnerAndRepo(repo)
|
||||||
|
return "", owner, repository, err
|
||||||
|
}
|
||||||
|
if len(org) > 0 {
|
||||||
|
return "", org, "", nil
|
||||||
|
}
|
||||||
|
if len(enterprise) > 0 {
|
||||||
|
return enterprise, "", "", nil
|
||||||
|
}
|
||||||
|
return "", "", "", fmt.Errorf("enterprise, organization and repository are all empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRegistrationKey(org, repo, enterprise string) string {
|
||||||
|
return fmt.Sprintf("org=%s,repo=%s,enterprise=%s", org, repo, enterprise)
|
||||||
}
|
}
|
||||||
|
|
||||||
func splitOwnerAndRepo(repo string) (string, string, error) {
|
func splitOwnerAndRepo(repo string) (string, string, error) {
|
||||||
@@ -244,3 +311,37 @@ func getEnterpriseApiUrl(baseURL string) (string, error) {
|
|||||||
// Trim trailing slash, otherwise there's double slash added to token endpoint
|
// Trim trailing slash, otherwise there's double slash added to token endpoint
|
||||||
return fmt.Sprintf("%s://%s%s", baseEndpoint.Scheme, baseEndpoint.Host, strings.TrimSuffix(baseEndpoint.Path, "/")), nil
|
return fmt.Sprintf("%s://%s%s", baseEndpoint.Scheme, baseEndpoint.Host, strings.TrimSuffix(baseEndpoint.Path, "/")), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RunnerNotFound struct {
|
||||||
|
runnerName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RunnerNotFound) Error() string {
|
||||||
|
return fmt.Sprintf("runner %q not found", e.runnerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
type RunnerOffline struct {
|
||||||
|
runnerName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *RunnerOffline) Error() string {
|
||||||
|
return fmt.Sprintf("runner %q offline", e.runnerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Client) IsRunnerBusy(ctx context.Context, enterprise, org, repo, name string) (bool, error) {
|
||||||
|
runners, err := r.ListRunners(ctx, enterprise, org, repo)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, runner := range runners {
|
||||||
|
if runner.GetName() == name {
|
||||||
|
if runner.GetStatus() == "offline" {
|
||||||
|
return false, &RunnerOffline{runnerName: name}
|
||||||
|
}
|
||||||
|
return runner.GetBusy(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, &RunnerNotFound{runnerName: name}
|
||||||
|
}
|
||||||
|
|||||||
@@ -32,29 +32,36 @@ func newTestClient() *Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
server = fake.NewServer()
|
res := &fake.FixedResponses{
|
||||||
|
ListRunners: fake.DefaultListRunnersHandler(),
|
||||||
|
}
|
||||||
|
server = fake.NewServer(fake.WithFixedResponses(res))
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
m.Run()
|
m.Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetRegistrationToken(t *testing.T) {
|
func TestGetRegistrationToken(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
org string
|
enterprise string
|
||||||
repo string
|
org string
|
||||||
token string
|
repo string
|
||||||
err bool
|
token string
|
||||||
|
err bool
|
||||||
}{
|
}{
|
||||||
{org: "", repo: "test/valid", token: fake.RegistrationToken, err: false},
|
{enterprise: "", org: "", repo: "test/valid", token: fake.RegistrationToken, err: false},
|
||||||
{org: "", repo: "test/invalid", token: "", err: true},
|
{enterprise: "", org: "", repo: "test/invalid", token: "", err: true},
|
||||||
{org: "", repo: "test/error", token: "", err: true},
|
{enterprise: "", org: "", repo: "test/error", token: "", err: true},
|
||||||
{org: "test", repo: "", token: fake.RegistrationToken, err: false},
|
{enterprise: "", org: "test", repo: "", token: fake.RegistrationToken, err: false},
|
||||||
{org: "invalid", repo: "", token: "", err: true},
|
{enterprise: "", org: "invalid", repo: "", token: "", err: true},
|
||||||
{org: "error", repo: "", token: "", err: true},
|
{enterprise: "", org: "error", repo: "", token: "", err: true},
|
||||||
|
{enterprise: "test", org: "", repo: "", token: fake.RegistrationToken, err: false},
|
||||||
|
{enterprise: "invalid", org: "", repo: "", token: "", err: true},
|
||||||
|
{enterprise: "error", org: "", repo: "", token: "", err: true},
|
||||||
}
|
}
|
||||||
|
|
||||||
client := newTestClient()
|
client := newTestClient()
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
rt, err := client.GetRegistrationToken(context.Background(), tt.org, tt.repo, "test")
|
rt, err := client.GetRegistrationToken(context.Background(), tt.enterprise, tt.org, tt.repo, "test")
|
||||||
if !tt.err && err != nil {
|
if !tt.err && err != nil {
|
||||||
t.Errorf("[%d] unexpected error: %v", i, err)
|
t.Errorf("[%d] unexpected error: %v", i, err)
|
||||||
}
|
}
|
||||||
@@ -66,22 +73,26 @@ func TestGetRegistrationToken(t *testing.T) {
|
|||||||
|
|
||||||
func TestListRunners(t *testing.T) {
|
func TestListRunners(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
org string
|
enterprise string
|
||||||
repo string
|
org string
|
||||||
length int
|
repo string
|
||||||
err bool
|
length int
|
||||||
|
err bool
|
||||||
}{
|
}{
|
||||||
{org: "", repo: "test/valid", length: 2, err: false},
|
{enterprise: "", org: "", repo: "test/valid", length: 2, err: false},
|
||||||
{org: "", repo: "test/invalid", length: 0, err: true},
|
{enterprise: "", org: "", repo: "test/invalid", length: 0, err: true},
|
||||||
{org: "", repo: "test/error", length: 0, err: true},
|
{enterprise: "", org: "", repo: "test/error", length: 0, err: true},
|
||||||
{org: "test", repo: "", length: 2, err: false},
|
{enterprise: "", org: "test", repo: "", length: 2, err: false},
|
||||||
{org: "invalid", repo: "", length: 0, err: true},
|
{enterprise: "", org: "invalid", repo: "", length: 0, err: true},
|
||||||
{org: "error", repo: "", length: 0, err: true},
|
{enterprise: "", org: "error", repo: "", length: 0, err: true},
|
||||||
|
{enterprise: "test", org: "", repo: "", length: 2, err: false},
|
||||||
|
{enterprise: "invalid", org: "", repo: "", length: 0, err: true},
|
||||||
|
{enterprise: "error", org: "", repo: "", length: 0, err: true},
|
||||||
}
|
}
|
||||||
|
|
||||||
client := newTestClient()
|
client := newTestClient()
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
runners, err := client.ListRunners(context.Background(), tt.org, tt.repo)
|
runners, err := client.ListRunners(context.Background(), tt.enterprise, tt.org, tt.repo)
|
||||||
if !tt.err && err != nil {
|
if !tt.err && err != nil {
|
||||||
t.Errorf("[%d] unexpected error: %v", i, err)
|
t.Errorf("[%d] unexpected error: %v", i, err)
|
||||||
}
|
}
|
||||||
@@ -93,21 +104,25 @@ func TestListRunners(t *testing.T) {
|
|||||||
|
|
||||||
func TestRemoveRunner(t *testing.T) {
|
func TestRemoveRunner(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
org string
|
enterprise string
|
||||||
repo string
|
org string
|
||||||
err bool
|
repo string
|
||||||
|
err bool
|
||||||
}{
|
}{
|
||||||
{org: "", repo: "test/valid", err: false},
|
{enterprise: "", org: "", repo: "test/valid", err: false},
|
||||||
{org: "", repo: "test/invalid", err: true},
|
{enterprise: "", org: "", repo: "test/invalid", err: true},
|
||||||
{org: "", repo: "test/error", err: true},
|
{enterprise: "", org: "", repo: "test/error", err: true},
|
||||||
{org: "test", repo: "", err: false},
|
{enterprise: "", org: "test", repo: "", err: false},
|
||||||
{org: "invalid", repo: "", err: true},
|
{enterprise: "", org: "invalid", repo: "", err: true},
|
||||||
{org: "error", repo: "", err: true},
|
{enterprise: "", org: "error", repo: "", err: true},
|
||||||
|
{enterprise: "test", org: "", repo: "", err: false},
|
||||||
|
{enterprise: "invalid", org: "", repo: "", err: true},
|
||||||
|
{enterprise: "error", org: "", repo: "", err: true},
|
||||||
}
|
}
|
||||||
|
|
||||||
client := newTestClient()
|
client := newTestClient()
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
err := client.RemoveRunner(context.Background(), tt.org, tt.repo, int64(1))
|
err := client.RemoveRunner(context.Background(), tt.enterprise, tt.org, tt.repo, int64(1))
|
||||||
if !tt.err && err != nil {
|
if !tt.err && err != nil {
|
||||||
t.Errorf("[%d] unexpected error: %v", i, err)
|
t.Errorf("[%d] unexpected error: %v", i, err)
|
||||||
}
|
}
|
||||||
|
|||||||
63
github/metrics/transport.go
Normal file
63
github/metrics/transport.go
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
// Package metrics provides monitoring of the GitHub related metrics.
|
||||||
|
//
|
||||||
|
// This depends on the metrics exporter of kubebuilder.
|
||||||
|
// See https://book.kubebuilder.io/reference/metrics.html for details.
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
metrics.Registry.MustRegister(metricRateLimit, metricRateLimitRemaining)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting
|
||||||
|
metricRateLimit = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "github_rate_limit",
|
||||||
|
Help: "The maximum number of requests you're permitted to make per hour",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
metricRateLimitRemaining = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "github_rate_limit_remaining",
|
||||||
|
Help: "The number of requests remaining in the current rate limit window",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting
|
||||||
|
headerRateLimit = "X-RateLimit-Limit"
|
||||||
|
headerRateLimitRemaining = "X-RateLimit-Remaining"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Transport wraps a transport with metrics monitoring
|
||||||
|
type Transport struct {
|
||||||
|
Transport http.RoundTripper
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
resp, err := t.Transport.RoundTrip(req)
|
||||||
|
if resp != nil {
|
||||||
|
parseResponse(resp)
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseResponse(resp *http.Response) {
|
||||||
|
rateLimit, err := strconv.Atoi(resp.Header.Get(headerRateLimit))
|
||||||
|
if err == nil {
|
||||||
|
metricRateLimit.Set(float64(rateLimit))
|
||||||
|
}
|
||||||
|
rateLimitRemaining, err := strconv.Atoi(resp.Header.Get(headerRateLimitRemaining))
|
||||||
|
if err == nil {
|
||||||
|
metricRateLimitRemaining.Set(float64(rateLimitRemaining))
|
||||||
|
}
|
||||||
|
}
|
||||||
7
go.mod
7
go.mod
@@ -6,14 +6,13 @@ require (
|
|||||||
github.com/bradleyfalzon/ghinstallation v1.1.1
|
github.com/bradleyfalzon/ghinstallation v1.1.1
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/go-logr/logr v0.1.0
|
github.com/go-logr/logr v0.1.0
|
||||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
github.com/google/go-cmp v0.3.1
|
||||||
github.com/google/go-github/v32 v32.1.1-0.20200822031813-d57a3a84ba04
|
github.com/google/go-github/v33 v33.0.1-0.20210204004227-319dcffb518a
|
||||||
github.com/google/go-github/v33 v33.0.0
|
|
||||||
github.com/google/go-querystring v1.0.0
|
|
||||||
github.com/gorilla/mux v1.8.0
|
github.com/gorilla/mux v1.8.0
|
||||||
github.com/kelseyhightower/envconfig v1.4.0
|
github.com/kelseyhightower/envconfig v1.4.0
|
||||||
github.com/onsi/ginkgo v1.8.0
|
github.com/onsi/ginkgo v1.8.0
|
||||||
github.com/onsi/gomega v1.5.0
|
github.com/onsi/gomega v1.5.0
|
||||||
|
github.com/prometheus/client_golang v0.9.2
|
||||||
github.com/stretchr/testify v1.4.0 // indirect
|
github.com/stretchr/testify v1.4.0 // indirect
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
||||||
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f
|
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f
|
||||||
|
|||||||
8
go.sum
8
go.sum
@@ -116,14 +116,10 @@ github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
|||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
|
||||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
|
||||||
github.com/google/go-github/v29 v29.0.2 h1:opYN6Wc7DOz7Ku3Oh4l7prmkOMwEcQxpFtxdU8N8Pts=
|
github.com/google/go-github/v29 v29.0.2 h1:opYN6Wc7DOz7Ku3Oh4l7prmkOMwEcQxpFtxdU8N8Pts=
|
||||||
github.com/google/go-github/v29 v29.0.2/go.mod h1:CHKiKKPHJ0REzfwc14QMklvtHwCveD0PxlMjLlzAM5E=
|
github.com/google/go-github/v29 v29.0.2/go.mod h1:CHKiKKPHJ0REzfwc14QMklvtHwCveD0PxlMjLlzAM5E=
|
||||||
github.com/google/go-github/v32 v32.1.1-0.20200822031813-d57a3a84ba04 h1:wEYk2h/GwOhImcVjiTIceP88WxVbXw2F+ARYUQMEsfg=
|
github.com/google/go-github/v33 v33.0.1-0.20210204004227-319dcffb518a h1:Z9Nzq8ntvvXCLnFGOkzzcD8HDOzOo+obuwE5oK85vNQ=
|
||||||
github.com/google/go-github/v32 v32.1.1-0.20200822031813-d57a3a84ba04/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI=
|
github.com/google/go-github/v33 v33.0.1-0.20210204004227-319dcffb518a/go.mod h1:GMdDnVZY/2TsWgp/lkYnpSAh6TrzhANBBwm6k6TTEXg=
|
||||||
github.com/google/go-github/v33 v33.0.0 h1:qAf9yP0qc54ufQxzwv+u9H0tiVOnPJxo0lI/JXqw3ZM=
|
|
||||||
github.com/google/go-github/v33 v33.0.0/go.mod h1:GMdDnVZY/2TsWgp/lkYnpSAh6TrzhANBBwm6k6TTEXg=
|
|
||||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||||
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||||
|
|||||||
68
main.go
68
main.go
@@ -20,6 +20,7 @@ import (
|
|||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/kelseyhightower/envconfig"
|
"github.com/kelseyhightower/envconfig"
|
||||||
@@ -40,8 +41,8 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
scheme = runtime.NewScheme()
|
scheme = runtime.NewScheme()
|
||||||
setupLog = ctrl.Log.WithName("setup")
|
log = ctrl.Log.WithName("actions-runner-controller")
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -62,6 +63,9 @@ func main() {
|
|||||||
|
|
||||||
runnerImage string
|
runnerImage string
|
||||||
dockerImage string
|
dockerImage string
|
||||||
|
namespace string
|
||||||
|
|
||||||
|
commonRunnerLabels commaSeparatedStringSlice
|
||||||
)
|
)
|
||||||
|
|
||||||
var c github.Config
|
var c github.Config
|
||||||
@@ -80,6 +84,8 @@ func main() {
|
|||||||
flag.Int64Var(&c.AppInstallationID, "github-app-installation-id", c.AppInstallationID, "The installation ID of GitHub App.")
|
flag.Int64Var(&c.AppInstallationID, "github-app-installation-id", c.AppInstallationID, "The installation ID of GitHub App.")
|
||||||
flag.StringVar(&c.AppPrivateKey, "github-app-private-key", c.AppPrivateKey, "The path of a private key file to authenticate as a GitHub App")
|
flag.StringVar(&c.AppPrivateKey, "github-app-private-key", c.AppPrivateKey, "The path of a private key file to authenticate as a GitHub App")
|
||||||
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change")
|
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change")
|
||||||
|
flag.Var(&commonRunnerLabels, "common-runner-labels", "Runner labels in the K1=V1,K2=V2,... format that are inherited all the runners created by the controller. See https://github.com/summerwind/actions-runner-controller/issues/321 for more information")
|
||||||
|
flag.StringVar(&namespace, "watch-namespace", "", "The namespace to watch for custom resources. Set to empty for letting it watch for all namespaces.")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
logger := zap.New(func(o *zap.Options) {
|
logger := zap.New(func(o *zap.Options) {
|
||||||
@@ -100,15 +106,16 @@ func main() {
|
|||||||
LeaderElection: enableLeaderElection,
|
LeaderElection: enableLeaderElection,
|
||||||
Port: 9443,
|
Port: 9443,
|
||||||
SyncPeriod: &syncPeriod,
|
SyncPeriod: &syncPeriod,
|
||||||
|
Namespace: namespace,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
setupLog.Error(err, "unable to start manager")
|
log.Error(err, "unable to start manager")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
runnerReconciler := &controllers.RunnerReconciler{
|
runnerReconciler := &controllers.RunnerReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Log: ctrl.Log.WithName("controllers").WithName("Runner"),
|
Log: log.WithName("runner"),
|
||||||
Scheme: mgr.GetScheme(),
|
Scheme: mgr.GetScheme(),
|
||||||
GitHubClient: ghClient,
|
GitHubClient: ghClient,
|
||||||
RunnerImage: runnerImage,
|
RunnerImage: runnerImage,
|
||||||
@@ -116,62 +123,81 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err = runnerReconciler.SetupWithManager(mgr); err != nil {
|
if err = runnerReconciler.SetupWithManager(mgr); err != nil {
|
||||||
setupLog.Error(err, "unable to create controller", "controller", "Runner")
|
log.Error(err, "unable to create controller", "controller", "Runner")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
runnerSetReconciler := &controllers.RunnerReplicaSetReconciler{
|
runnerSetReconciler := &controllers.RunnerReplicaSetReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Log: ctrl.Log.WithName("controllers").WithName("RunnerReplicaSet"),
|
Log: log.WithName("runnerreplicaset"),
|
||||||
Scheme: mgr.GetScheme(),
|
Scheme: mgr.GetScheme(),
|
||||||
GitHubClient: ghClient,
|
GitHubClient: ghClient,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = runnerSetReconciler.SetupWithManager(mgr); err != nil {
|
if err = runnerSetReconciler.SetupWithManager(mgr); err != nil {
|
||||||
setupLog.Error(err, "unable to create controller", "controller", "RunnerReplicaSet")
|
log.Error(err, "unable to create controller", "controller", "RunnerReplicaSet")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
runnerDeploymentReconciler := &controllers.RunnerDeploymentReconciler{
|
runnerDeploymentReconciler := &controllers.RunnerDeploymentReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Log: ctrl.Log.WithName("controllers").WithName("RunnerDeployment"),
|
Log: log.WithName("runnerdeployment"),
|
||||||
Scheme: mgr.GetScheme(),
|
Scheme: mgr.GetScheme(),
|
||||||
|
CommonRunnerLabels: commonRunnerLabels,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = runnerDeploymentReconciler.SetupWithManager(mgr); err != nil {
|
if err = runnerDeploymentReconciler.SetupWithManager(mgr); err != nil {
|
||||||
setupLog.Error(err, "unable to create controller", "controller", "RunnerDeployment")
|
log.Error(err, "unable to create controller", "controller", "RunnerDeployment")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
horizontalRunnerAutoscaler := &controllers.HorizontalRunnerAutoscalerReconciler{
|
horizontalRunnerAutoscaler := &controllers.HorizontalRunnerAutoscalerReconciler{
|
||||||
Client: mgr.GetClient(),
|
Client: mgr.GetClient(),
|
||||||
Log: ctrl.Log.WithName("controllers").WithName("HorizontalRunnerAutoscaler"),
|
Log: log.WithName("horizontalrunnerautoscaler"),
|
||||||
Scheme: mgr.GetScheme(),
|
Scheme: mgr.GetScheme(),
|
||||||
GitHubClient: ghClient,
|
GitHubClient: ghClient,
|
||||||
|
CacheDuration: syncPeriod - 10*time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = horizontalRunnerAutoscaler.SetupWithManager(mgr); err != nil {
|
if err = horizontalRunnerAutoscaler.SetupWithManager(mgr); err != nil {
|
||||||
setupLog.Error(err, "unable to create controller", "controller", "HorizontalRunnerAutoscaler")
|
log.Error(err, "unable to create controller", "controller", "HorizontalRunnerAutoscaler")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = (&actionsv1alpha1.Runner{}).SetupWebhookWithManager(mgr); err != nil {
|
if err = (&actionsv1alpha1.Runner{}).SetupWebhookWithManager(mgr); err != nil {
|
||||||
setupLog.Error(err, "unable to create webhook", "webhook", "Runner")
|
log.Error(err, "unable to create webhook", "webhook", "Runner")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
if err = (&actionsv1alpha1.RunnerDeployment{}).SetupWebhookWithManager(mgr); err != nil {
|
if err = (&actionsv1alpha1.RunnerDeployment{}).SetupWebhookWithManager(mgr); err != nil {
|
||||||
setupLog.Error(err, "unable to create webhook", "webhook", "RunnerDeployment")
|
log.Error(err, "unable to create webhook", "webhook", "RunnerDeployment")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
if err = (&actionsv1alpha1.RunnerReplicaSet{}).SetupWebhookWithManager(mgr); err != nil {
|
if err = (&actionsv1alpha1.RunnerReplicaSet{}).SetupWebhookWithManager(mgr); err != nil {
|
||||||
setupLog.Error(err, "unable to create webhook", "webhook", "RunnerReplicaSet")
|
log.Error(err, "unable to create webhook", "webhook", "RunnerReplicaSet")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
// +kubebuilder:scaffold:builder
|
// +kubebuilder:scaffold:builder
|
||||||
|
|
||||||
setupLog.Info("starting manager")
|
log.Info("starting manager")
|
||||||
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||||
setupLog.Error(err, "problem running manager")
|
log.Error(err, "problem running manager")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type commaSeparatedStringSlice []string
|
||||||
|
|
||||||
|
func (s *commaSeparatedStringSlice) String() string {
|
||||||
|
return fmt.Sprintf("%v", *s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *commaSeparatedStringSlice) Set(value string) error {
|
||||||
|
for _, v := range strings.Split(value, ",") {
|
||||||
|
if v == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
*s = append(*s, v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
8
pkg/actionsglob/README.md
Normal file
8
pkg/actionsglob/README.md
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
This package is an implementation of glob that is intended to simulate the behaviour of
|
||||||
|
https://github.com/actions/toolkit/tree/master/packages/glob in many cases.
|
||||||
|
|
||||||
|
This isn't a complete reimplementation of the referenced nodejs package.
|
||||||
|
|
||||||
|
Differences:
|
||||||
|
|
||||||
|
- This package doesn't implement `**`
|
||||||
78
pkg/actionsglob/actionsglob.go
Normal file
78
pkg/actionsglob/actionsglob.go
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
package actionsglob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Match(pat string, s string) bool {
|
||||||
|
if len(pat) == 0 {
|
||||||
|
panic(fmt.Sprintf("unexpected length of pattern: %d", len(pat)))
|
||||||
|
}
|
||||||
|
|
||||||
|
var inverse bool
|
||||||
|
|
||||||
|
if pat[0] == '!' {
|
||||||
|
pat = pat[1:]
|
||||||
|
inverse = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens := strings.SplitAfter(pat, "*")
|
||||||
|
|
||||||
|
var wildcardInHead bool
|
||||||
|
|
||||||
|
for i := 0; i < len(tokens); i++ {
|
||||||
|
p := tokens[i]
|
||||||
|
|
||||||
|
if p == "" {
|
||||||
|
s = ""
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if p == "*" {
|
||||||
|
if i == len(tokens)-1 {
|
||||||
|
s = ""
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
wildcardInHead = true
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
wildcardInTail := p[len(p)-1] == '*'
|
||||||
|
if wildcardInTail {
|
||||||
|
p = p[:len(p)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
subs := strings.SplitN(s, p, 2)
|
||||||
|
|
||||||
|
if len(subs) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if subs[0] != "" {
|
||||||
|
if !wildcardInHead {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if subs[1] != "" {
|
||||||
|
if !wildcardInTail {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s = subs[1]
|
||||||
|
|
||||||
|
wildcardInHead = wildcardInTail
|
||||||
|
}
|
||||||
|
|
||||||
|
r := s == ""
|
||||||
|
|
||||||
|
if inverse {
|
||||||
|
r = !r
|
||||||
|
}
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
214
pkg/actionsglob/match_test.go
Normal file
214
pkg/actionsglob/match_test.go
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
package actionsglob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMatch(t *testing.T) {
|
||||||
|
type testcase struct {
|
||||||
|
Pattern, Target string
|
||||||
|
Want bool
|
||||||
|
}
|
||||||
|
|
||||||
|
run := func(t *testing.T, tc testcase) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
got := Match(tc.Pattern, tc.Target)
|
||||||
|
|
||||||
|
if got != tc.Want {
|
||||||
|
t.Errorf("%s against %s: want %v, got %v", tc.Pattern, tc.Target, tc.Want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("foo == foo", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "foo",
|
||||||
|
Target: "foo",
|
||||||
|
Want: true,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("!foo == foo", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "!foo",
|
||||||
|
Target: "foo",
|
||||||
|
Want: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("foo == foo1", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "foo",
|
||||||
|
Target: "foo1",
|
||||||
|
Want: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("!foo == foo1", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "!foo",
|
||||||
|
Target: "foo1",
|
||||||
|
Want: true,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("*foo == foo", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "*foo",
|
||||||
|
Target: "foo",
|
||||||
|
Want: true,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("!*foo == foo", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "!*foo",
|
||||||
|
Target: "foo",
|
||||||
|
Want: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("*foo == 1foo", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "*foo",
|
||||||
|
Target: "1foo",
|
||||||
|
Want: true,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("!*foo == 1foo", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "!*foo",
|
||||||
|
Target: "1foo",
|
||||||
|
Want: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("*foo == foo1", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "*foo",
|
||||||
|
Target: "foo1",
|
||||||
|
Want: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("!*foo == foo1", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "!*foo",
|
||||||
|
Target: "foo1",
|
||||||
|
Want: true,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("*foo* == foo1", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "*foo*",
|
||||||
|
Target: "foo1",
|
||||||
|
Want: true,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("!*foo* == foo1", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "!*foo*",
|
||||||
|
Target: "foo1",
|
||||||
|
Want: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("*foo == foobar", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "*foo",
|
||||||
|
Target: "foobar",
|
||||||
|
Want: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("!*foo == foobar", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "!*foo",
|
||||||
|
Target: "foobar",
|
||||||
|
Want: true,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("*foo* == foobar", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "*foo*",
|
||||||
|
Target: "foobar",
|
||||||
|
Want: true,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("!*foo* == foobar", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "!*foo*",
|
||||||
|
Target: "foobar",
|
||||||
|
Want: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("foo* == foo", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "foo*",
|
||||||
|
Target: "foo",
|
||||||
|
Want: true,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("!foo* == foo", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "!foo*",
|
||||||
|
Target: "foo",
|
||||||
|
Want: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("foo* == foobar", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "foo*",
|
||||||
|
Target: "foobar",
|
||||||
|
Want: true,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("!foo* == foobar", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "!foo*",
|
||||||
|
Target: "foobar",
|
||||||
|
Want: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("foo (* == foo ( 1 / 2 )", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "foo (*",
|
||||||
|
Target: "foo ( 1 / 2 )",
|
||||||
|
Want: true,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("!foo (* == foo ( 1 / 2 )", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "!foo (*",
|
||||||
|
Target: "foo ( 1 / 2 )",
|
||||||
|
Want: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("actions-*-metrics == actions-workflow-metrics", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "actions-*-metrics",
|
||||||
|
Target: "actions-workflow-metrics",
|
||||||
|
Want: true,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("!actions-*-metrics == actions-workflow-metrics", func(t *testing.T) {
|
||||||
|
run(t, testcase{
|
||||||
|
Pattern: "!actions-*-metrics",
|
||||||
|
Target: "actions-workflow-metrics",
|
||||||
|
Want: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM ubuntu:18.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
ARG RUNNER_VERSION=2.274.2
|
ARG RUNNER_VERSION=2.274.2
|
||||||
@@ -8,36 +8,37 @@ RUN test -n "$TARGETPLATFORM" || (echo "TARGETPLATFORM must be set" && false)
|
|||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
RUN apt update -y \
|
RUN apt update -y \
|
||||||
&& apt install -y software-properties-common \
|
&& apt install -y software-properties-common \
|
||||||
&& add-apt-repository -y ppa:git-core/ppa \
|
&& add-apt-repository -y ppa:git-core/ppa \
|
||||||
&& apt update -y \
|
&& apt update -y \
|
||||||
&& apt install -y --no-install-recommends \
|
&& apt install -y --no-install-recommends \
|
||||||
build-essential \
|
build-essential \
|
||||||
curl \
|
curl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
dnsutils \
|
dnsutils \
|
||||||
ftp \
|
ftp \
|
||||||
git \
|
git \
|
||||||
iproute2 \
|
iproute2 \
|
||||||
iputils-ping \
|
iputils-ping \
|
||||||
jq \
|
jq \
|
||||||
libunwind8 \
|
libunwind8 \
|
||||||
locales \
|
locales \
|
||||||
netcat \
|
netcat \
|
||||||
openssh-client \
|
openssh-client \
|
||||||
parallel \
|
parallel \
|
||||||
rsync \
|
rsync \
|
||||||
shellcheck \
|
shellcheck \
|
||||||
sudo \
|
sudo \
|
||||||
telnet \
|
telnet \
|
||||||
time \
|
time \
|
||||||
tzdata \
|
tzdata \
|
||||||
unzip \
|
unzip \
|
||||||
upx \
|
upx \
|
||||||
wget \
|
wget \
|
||||||
zip \
|
zip \
|
||||||
zstd \
|
zstd \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
python-is-python3 \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||||
&& curl -L -o /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.2/dumb-init_1.2.2_${ARCH} \
|
&& curl -L -o /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.2/dumb-init_1.2.2_${ARCH} \
|
||||||
@@ -45,18 +46,18 @@ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
|||||||
|
|
||||||
# Docker download supports arm64 as aarch64 & amd64 as x86_64
|
# Docker download supports arm64 as aarch64 & amd64 as x86_64
|
||||||
RUN set -vx; \
|
RUN set -vx; \
|
||||||
export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||||
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
||||||
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x86_64 ; fi \
|
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x86_64 ; fi \
|
||||||
&& curl -L -o docker.tgz https://download.docker.com/linux/static/stable/${ARCH}/docker-${DOCKER_VERSION}.tgz \
|
&& curl -L -o docker.tgz https://download.docker.com/linux/static/stable/${ARCH}/docker-${DOCKER_VERSION}.tgz \
|
||||||
&& tar zxvf docker.tgz \
|
&& tar zxvf docker.tgz \
|
||||||
&& install -o root -g root -m 755 docker/docker /usr/local/bin/docker \
|
&& install -o root -g root -m 755 docker/docker /usr/local/bin/docker \
|
||||||
&& rm -rf docker docker.tgz \
|
&& rm -rf docker docker.tgz \
|
||||||
&& adduser --disabled-password --gecos "" --uid 1000 runner \
|
&& adduser --disabled-password --gecos "" --uid 1000 runner \
|
||||||
&& groupadd docker \
|
&& groupadd docker \
|
||||||
&& usermod -aG sudo runner \
|
&& usermod -aG sudo runner \
|
||||||
&& usermod -aG docker runner \
|
&& usermod -aG docker runner \
|
||||||
&& echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers
|
&& echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers
|
||||||
|
|
||||||
ENV RUNNER_ASSETS_DIR=/runnertmp
|
ENV RUNNER_ASSETS_DIR=/runnertmp
|
||||||
|
|
||||||
@@ -66,24 +67,24 @@ ENV RUNNER_ASSETS_DIR=/runnertmp
|
|||||||
# It is installed after installdependencies.sh and before removing /var/lib/apt/lists
|
# It is installed after installdependencies.sh and before removing /var/lib/apt/lists
|
||||||
# to avoid rerunning apt-update on its own.
|
# to avoid rerunning apt-update on its own.
|
||||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||||
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x64 ; fi \
|
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x64 ; fi \
|
||||||
&& mkdir -p "$RUNNER_ASSETS_DIR" \
|
&& mkdir -p "$RUNNER_ASSETS_DIR" \
|
||||||
&& cd "$RUNNER_ASSETS_DIR" \
|
&& cd "$RUNNER_ASSETS_DIR" \
|
||||||
&& curl -L -o runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${ARCH}-${RUNNER_VERSION}.tar.gz \
|
&& curl -L -o runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${ARCH}-${RUNNER_VERSION}.tar.gz \
|
||||||
&& tar xzf ./runner.tar.gz \
|
&& tar xzf ./runner.tar.gz \
|
||||||
&& rm runner.tar.gz \
|
&& rm runner.tar.gz \
|
||||||
&& ./bin/installdependencies.sh \
|
&& ./bin/installdependencies.sh \
|
||||||
&& mv ./externals ./externalstmp \
|
&& mv ./externals ./externalstmp \
|
||||||
&& apt-get install -y libyaml-dev \
|
&& apt-get install -y libyaml-dev \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN echo AGENT_TOOLSDIRECTORY=/opt/hostedtoolcache > .env \
|
RUN echo AGENT_TOOLSDIRECTORY=/opt/hostedtoolcache > .env \
|
||||||
&& mkdir /opt/hostedtoolcache \
|
&& mkdir /opt/hostedtoolcache \
|
||||||
&& chgrp runner /opt/hostedtoolcache \
|
&& chgrp docker /opt/hostedtoolcache \
|
||||||
&& chmod g+rwx /opt/hostedtoolcache
|
&& chmod g+rwx /opt/hostedtoolcache
|
||||||
|
|
||||||
COPY entrypoint.sh /
|
COPY entrypoint.sh /
|
||||||
COPY patched $RUNNER_ASSETS_DIR/patched
|
COPY --chown=runner:docker patched $RUNNER_ASSETS_DIR/patched
|
||||||
|
|
||||||
USER runner
|
USER runner
|
||||||
ENTRYPOINT ["/usr/local/bin/dumb-init", "--"]
|
ENTRYPOINT ["/usr/local/bin/dumb-init", "--"]
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
# Dev + DinD dependencies
|
RUN apt update -y \
|
||||||
RUN apt update \
|
|
||||||
&& apt install -y software-properties-common \
|
&& apt install -y software-properties-common \
|
||||||
&& add-apt-repository -y ppa:git-core/ppa \
|
&& add-apt-repository -y ppa:git-core/ppa \
|
||||||
&& apt install -y \
|
&& apt update -y \
|
||||||
|
&& apt install -y --no-install-recommends \
|
||||||
|
software-properties-common \
|
||||||
build-essential \
|
build-essential \
|
||||||
curl \
|
curl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
@@ -13,7 +14,6 @@ RUN apt update \
|
|||||||
ftp \
|
ftp \
|
||||||
git \
|
git \
|
||||||
iproute2 \
|
iproute2 \
|
||||||
iptables \
|
|
||||||
iputils-ping \
|
iputils-ping \
|
||||||
jq \
|
jq \
|
||||||
libunwind8 \
|
libunwind8 \
|
||||||
@@ -24,7 +24,6 @@ RUN apt update \
|
|||||||
rsync \
|
rsync \
|
||||||
shellcheck \
|
shellcheck \
|
||||||
sudo \
|
sudo \
|
||||||
supervisor \
|
|
||||||
telnet \
|
telnet \
|
||||||
time \
|
time \
|
||||||
tzdata \
|
tzdata \
|
||||||
@@ -33,6 +32,9 @@ RUN apt update \
|
|||||||
wget \
|
wget \
|
||||||
zip \
|
zip \
|
||||||
zstd \
|
zstd \
|
||||||
|
python-is-python3 \
|
||||||
|
iptables \
|
||||||
|
supervisor \
|
||||||
&& rm -rf /var/lib/apt/list/*
|
&& rm -rf /var/lib/apt/list/*
|
||||||
|
|
||||||
# Runner user
|
# Runner user
|
||||||
@@ -78,7 +80,7 @@ ENV RUNNER_ASSETS_DIR=/runnertmp
|
|||||||
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||||
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x64 ; fi \
|
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x64 ; fi \
|
||||||
&& mkdir -p "$RUNNER_ASSETS_DIR" \
|
&& mkdir -p "$RUNNER_ASSETS_DIR" \
|
||||||
&& cd "$RUNNER_ASSETS_DIR" \
|
&& cd "$RUNNER_ASSETS_DIR" \
|
||||||
&& curl -L -o runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${ARCH}-${RUNNER_VERSION}.tar.gz \
|
&& curl -L -o runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${ARCH}-${RUNNER_VERSION}.tar.gz \
|
||||||
&& tar xzf ./runner.tar.gz \
|
&& tar xzf ./runner.tar.gz \
|
||||||
&& rm runner.tar.gz \
|
&& rm runner.tar.gz \
|
||||||
@@ -87,9 +89,9 @@ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
|||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN echo AGENT_TOOLSDIRECTORY=/opt/hostedtoolcache > /runner.env \
|
RUN echo AGENT_TOOLSDIRECTORY=/opt/hostedtoolcache > /runner.env \
|
||||||
&& mkdir /opt/hostedtoolcache \
|
&& mkdir /opt/hostedtoolcache \
|
||||||
&& chgrp runner /opt/hostedtoolcache \
|
&& chgrp docker /opt/hostedtoolcache \
|
||||||
&& chmod g+rwx /opt/hostedtoolcache
|
&& chmod g+rwx /opt/hostedtoolcache
|
||||||
|
|
||||||
COPY modprobe startup.sh /usr/local/bin/
|
COPY modprobe startup.sh /usr/local/bin/
|
||||||
COPY supervisor/ /etc/supervisor/conf.d/
|
COPY supervisor/ /etc/supervisor/conf.d/
|
||||||
@@ -104,7 +106,7 @@ RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
|||||||
|
|
||||||
VOLUME /var/lib/docker
|
VOLUME /var/lib/docker
|
||||||
|
|
||||||
COPY patched $RUNNER_ASSETS_DIR/patched
|
COPY --chown=runner:docker patched $RUNNER_ASSETS_DIR/patched
|
||||||
|
|
||||||
# No group definition, as that makes it harder to run docker.
|
# No group definition, as that makes it harder to run docker.
|
||||||
USER runner
|
USER runner
|
||||||
91
runner/Dockerfile.ubuntu.1804
Normal file
91
runner/Dockerfile.ubuntu.1804
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
ARG RUNNER_VERSION=2.274.2
|
||||||
|
ARG DOCKER_VERSION=19.03.12
|
||||||
|
|
||||||
|
RUN test -n "$TARGETPLATFORM" || (echo "TARGETPLATFORM must be set" && false)
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
RUN apt update -y \
|
||||||
|
&& apt install -y software-properties-common \
|
||||||
|
&& add-apt-repository -y ppa:git-core/ppa \
|
||||||
|
&& apt update -y \
|
||||||
|
&& apt install -y --no-install-recommends \
|
||||||
|
build-essential \
|
||||||
|
curl \
|
||||||
|
ca-certificates \
|
||||||
|
dnsutils \
|
||||||
|
ftp \
|
||||||
|
git \
|
||||||
|
iproute2 \
|
||||||
|
iputils-ping \
|
||||||
|
jq \
|
||||||
|
libunwind8 \
|
||||||
|
locales \
|
||||||
|
netcat \
|
||||||
|
openssh-client \
|
||||||
|
parallel \
|
||||||
|
rsync \
|
||||||
|
shellcheck \
|
||||||
|
sudo \
|
||||||
|
telnet \
|
||||||
|
time \
|
||||||
|
tzdata \
|
||||||
|
unzip \
|
||||||
|
upx \
|
||||||
|
wget \
|
||||||
|
zip \
|
||||||
|
zstd \
|
||||||
|
&& cd /usr/bin && ln -sf python3 python \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||||
|
&& curl -L -o /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.2/dumb-init_1.2.2_${ARCH} \
|
||||||
|
&& chmod +x /usr/local/bin/dumb-init
|
||||||
|
|
||||||
|
# Docker download supports arm64 as aarch64 & amd64 as x86_64
|
||||||
|
RUN set -vx; \
|
||||||
|
export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||||
|
&& if [ "$ARCH" = "arm64" ]; then export ARCH=aarch64 ; fi \
|
||||||
|
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x86_64 ; fi \
|
||||||
|
&& curl -L -o docker.tgz https://download.docker.com/linux/static/stable/${ARCH}/docker-${DOCKER_VERSION}.tgz \
|
||||||
|
&& tar zxvf docker.tgz \
|
||||||
|
&& install -o root -g root -m 755 docker/docker /usr/local/bin/docker \
|
||||||
|
&& rm -rf docker docker.tgz \
|
||||||
|
&& adduser --disabled-password --gecos "" --uid 1000 runner \
|
||||||
|
&& groupadd docker \
|
||||||
|
&& usermod -aG sudo runner \
|
||||||
|
&& usermod -aG docker runner \
|
||||||
|
&& echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers
|
||||||
|
|
||||||
|
ENV RUNNER_ASSETS_DIR=/runnertmp
|
||||||
|
|
||||||
|
# Runner download supports amd64 as x64. Externalstmp is needed for making mount points work inside DinD.
|
||||||
|
#
|
||||||
|
# libyaml-dev is required for ruby/setup-ruby action.
|
||||||
|
# It is installed after installdependencies.sh and before removing /var/lib/apt/lists
|
||||||
|
# to avoid rerunning apt-update on its own.
|
||||||
|
RUN export ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \
|
||||||
|
&& if [ "$ARCH" = "amd64" ]; then export ARCH=x64 ; fi \
|
||||||
|
&& mkdir -p "$RUNNER_ASSETS_DIR" \
|
||||||
|
&& cd "$RUNNER_ASSETS_DIR" \
|
||||||
|
&& curl -L -o runner.tar.gz https://github.com/actions/runner/releases/download/v${RUNNER_VERSION}/actions-runner-linux-${ARCH}-${RUNNER_VERSION}.tar.gz \
|
||||||
|
&& tar xzf ./runner.tar.gz \
|
||||||
|
&& rm runner.tar.gz \
|
||||||
|
&& ./bin/installdependencies.sh \
|
||||||
|
&& mv ./externals ./externalstmp \
|
||||||
|
&& apt-get install -y libyaml-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN echo AGENT_TOOLSDIRECTORY=/opt/hostedtoolcache > .env \
|
||||||
|
&& mkdir /opt/hostedtoolcache \
|
||||||
|
&& chgrp docker /opt/hostedtoolcache \
|
||||||
|
&& chmod g+rwx /opt/hostedtoolcache
|
||||||
|
|
||||||
|
COPY entrypoint.sh /
|
||||||
|
COPY --chown=runner:docker patched $RUNNER_ASSETS_DIR/patched
|
||||||
|
|
||||||
|
USER runner
|
||||||
|
ENTRYPOINT ["/usr/local/bin/dumb-init", "--"]
|
||||||
|
CMD ["/entrypoint.sh"]
|
||||||
@@ -2,7 +2,7 @@ NAME ?= summerwind/actions-runner
|
|||||||
DIND_RUNNER_NAME ?= ${NAME}-dind
|
DIND_RUNNER_NAME ?= ${NAME}-dind
|
||||||
TAG ?= latest
|
TAG ?= latest
|
||||||
|
|
||||||
RUNNER_VERSION ?= 2.274.2
|
RUNNER_VERSION ?= 2.277.1
|
||||||
DOCKER_VERSION ?= 19.03.12
|
DOCKER_VERSION ?= 19.03.12
|
||||||
|
|
||||||
# default list of platforms for which multiarch image is built
|
# default list of platforms for which multiarch image is built
|
||||||
@@ -22,16 +22,15 @@ else
|
|||||||
export PUSH_ARG="--push"
|
export PUSH_ARG="--push"
|
||||||
endif
|
endif
|
||||||
|
|
||||||
docker-build:
|
docker-build-ubuntu:
|
||||||
docker build --build-arg TARGETPLATFORM=amd64 --build-arg RUNNER_VERSION=${RUNNER_VERSION} --build-arg DOCKER_VERSION=${DOCKER_VERSION} -t ${NAME}:${TAG} .
|
docker build --build-arg TARGETPLATFORM=amd64 --build-arg RUNNER_VERSION=${RUNNER_VERSION} --build-arg DOCKER_VERSION=${DOCKER_VERSION} -t ${NAME}:${TAG} .
|
||||||
docker build --build-arg TARGETPLATFORM=amd64 --build-arg RUNNER_VERSION=${RUNNER_VERSION} --build-arg DOCKER_VERSION=${DOCKER_VERSION} -t ${DIND_RUNNER_NAME}:${TAG} -f dindrunner.Dockerfile .
|
docker build --build-arg TARGETPLATFORM=amd64 --build-arg RUNNER_VERSION=${RUNNER_VERSION} --build-arg DOCKER_VERSION=${DOCKER_VERSION} -t ${DIND_RUNNER_NAME}:${TAG} -f Dockerfile.dindrunner .
|
||||||
|
|
||||||
|
docker-push-ubuntu:
|
||||||
docker-push:
|
|
||||||
docker push ${NAME}:${TAG}
|
docker push ${NAME}:${TAG}
|
||||||
docker push ${DIND_RUNNER_NAME}:${TAG}
|
docker push ${DIND_RUNNER_NAME}:${TAG}
|
||||||
|
|
||||||
docker-buildx:
|
docker-buildx-ubuntu:
|
||||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
@if ! docker buildx ls | grep -q container-builder; then\
|
@if ! docker buildx ls | grep -q container-builder; then\
|
||||||
docker buildx create --platform ${PLATFORMS} --name container-builder --use;\
|
docker buildx create --platform ${PLATFORMS} --name container-builder --use;\
|
||||||
@@ -46,5 +45,5 @@ docker-buildx:
|
|||||||
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
--build-arg RUNNER_VERSION=${RUNNER_VERSION} \
|
||||||
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
--build-arg DOCKER_VERSION=${DOCKER_VERSION} \
|
||||||
-t "${DIND_RUNNER_NAME}:latest" \
|
-t "${DIND_RUNNER_NAME}:latest" \
|
||||||
-f dindrunner.Dockerfile \
|
-f Dockerfile.dindrunner \
|
||||||
. ${PUSH_ARG}
|
. ${PUSH_ARG}
|
||||||
|
|||||||
@@ -16,32 +16,26 @@ if [ -z "${RUNNER_NAME}" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "${RUNNER_ORG}" ] && [ -n "${RUNNER_REPO}" ]; then
|
if [ -n "${RUNNER_ORG}" ] && [ -n "${RUNNER_REPO}" ] && [ -n "${RUNNER_ENTERPRISE}" ]; then
|
||||||
ATTACH="${RUNNER_ORG}/${RUNNER_REPO}"
|
ATTACH="${RUNNER_ORG}/${RUNNER_REPO}"
|
||||||
elif [ -n "${RUNNER_ORG}" ]; then
|
elif [ -n "${RUNNER_ORG}" ]; then
|
||||||
ATTACH="${RUNNER_ORG}"
|
ATTACH="${RUNNER_ORG}"
|
||||||
elif [ -n "${RUNNER_REPO}" ]; then
|
elif [ -n "${RUNNER_REPO}" ]; then
|
||||||
ATTACH="${RUNNER_REPO}"
|
ATTACH="${RUNNER_REPO}"
|
||||||
|
elif [ -n "${RUNNER_ENTERPRISE}" ]; then
|
||||||
|
ATTACH="enterprises/${RUNNER_ENTERPRISE}"
|
||||||
else
|
else
|
||||||
echo "At least one of RUNNER_ORG or RUNNER_REPO must be set" 1>&2
|
echo "At least one of RUNNER_ORG or RUNNER_REPO or RUNNER_ENTERPRISE must be set" 1>&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "${RUNNER_WORKDIR}" ]; then
|
|
||||||
WORKDIR_ARG="--work ${RUNNER_WORKDIR}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${RUNNER_LABELS}" ]; then
|
|
||||||
LABEL_ARG="--labels ${RUNNER_LABELS}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "${RUNNER_TOKEN}" ]; then
|
if [ -z "${RUNNER_TOKEN}" ]; then
|
||||||
echo "RUNNER_TOKEN must be set" 1>&2
|
echo "RUNNER_TOKEN must be set" 1>&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "${RUNNER_REPO}" ] && [ -n "${RUNNER_ORG}" ] && [ -n "${RUNNER_GROUP}" ];then
|
if [ -z "${RUNNER_REPO}" ] && [ -n "${RUNNER_GROUP}" ];then
|
||||||
RUNNER_GROUP_ARG="--runnergroup ${RUNNER_GROUP}"
|
RUNNER_GROUPS=${RUNNER_GROUP}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Hack due to https://github.com/summerwind/actions-runner-controller/issues/252#issuecomment-758338483
|
# Hack due to https://github.com/summerwind/actions-runner-controller/issues/252#issuecomment-758338483
|
||||||
@@ -54,7 +48,14 @@ sudo chown -R runner:docker /runner
|
|||||||
mv /runnertmp/* /runner/
|
mv /runnertmp/* /runner/
|
||||||
|
|
||||||
cd /runner
|
cd /runner
|
||||||
./config.sh --unattended --replace --name "${RUNNER_NAME}" --url "${GITHUB_URL}${ATTACH}" --token "${RUNNER_TOKEN}" ${RUNNER_GROUP_ARG} ${LABEL_ARG} ${WORKDIR_ARG}
|
./config.sh --unattended --replace \
|
||||||
|
--name "${RUNNER_NAME}" \
|
||||||
|
--url "${GITHUB_URL}${ATTACH}" \
|
||||||
|
--token "${RUNNER_TOKEN}" \
|
||||||
|
--runnergroup "${RUNNER_GROUPS}" \
|
||||||
|
--labels "${RUNNER_LABELS}" \
|
||||||
|
--work "${RUNNER_WORKDIR}"
|
||||||
|
|
||||||
mkdir ./externals
|
mkdir ./externals
|
||||||
# Hack due to the DinD volumes
|
# Hack due to the DinD volumes
|
||||||
mv ./externalstmp/* ./externals/
|
mv ./externalstmp/* ./externals/
|
||||||
|
|||||||
@@ -17,6 +17,34 @@ function wait_for_process () {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sudo /bin/bash <<SCRIPT
|
||||||
|
mkdir -p /etc/docker
|
||||||
|
|
||||||
|
cat <<EOS > /etc/docker/daemon.json
|
||||||
|
{
|
||||||
|
EOS
|
||||||
|
|
||||||
|
if [ -n "${MTU}" ]; then
|
||||||
|
cat <<EOS >> /etc/docker/daemon.json
|
||||||
|
"mtu": ${MTU}
|
||||||
|
EOS
|
||||||
|
# See https://docs.docker.com/engine/security/rootless/
|
||||||
|
echo "environment=DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=${MTU}" >> /etc/supervisor/conf.d/dockerd.conf
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat <<EOS >> /etc/docker/daemon.json
|
||||||
|
}
|
||||||
|
EOS
|
||||||
|
SCRIPT
|
||||||
|
|
||||||
|
INFO "Using /etc/docker/daemon.json with the following content"
|
||||||
|
|
||||||
|
cat /etc/docker/daemon.json
|
||||||
|
|
||||||
|
INFO "Using /etc/supervisor/conf.d/dockerd.conf with the following content"
|
||||||
|
|
||||||
|
cat /etc/supervisor/conf.d/dockerd.conf
|
||||||
|
|
||||||
INFO "Starting supervisor"
|
INFO "Starting supervisor"
|
||||||
sudo /usr/bin/supervisord -n >> /dev/null 2>&1 &
|
sudo /usr/bin/supervisord -n >> /dev/null 2>&1 &
|
||||||
|
|
||||||
@@ -27,11 +55,17 @@ for process in "${processes[@]}"; do
|
|||||||
wait_for_process "$process"
|
wait_for_process "$process"
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
ERROR "$process is not running after max time"
|
ERROR "$process is not running after max time"
|
||||||
|
ERROR "Dumping /var/log/dockerd.err.log to help investigation"
|
||||||
|
cat /var/log/dockerd.err.log
|
||||||
exit 1
|
exit 1
|
||||||
else
|
else
|
||||||
INFO "$process is running"
|
INFO "$process is running"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
if [ -n "${MTU}" ]; then
|
||||||
|
ifconfig docker0 mtu ${MTU} up
|
||||||
|
fi
|
||||||
|
|
||||||
# Wait processes to be running
|
# Wait processes to be running
|
||||||
entrypoint.sh
|
entrypoint.sh
|
||||||
|
|||||||
Reference in New Issue
Block a user