mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
161 Commits
actions-ru
...
actions-ru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
67031acdc4 | ||
|
|
b1bfa8787f | ||
|
|
c78116b0f9 | ||
|
|
4ec57d3e39 | ||
|
|
79543add3f | ||
|
|
7722730dc0 | ||
|
|
044f4ad4ea | ||
|
|
20394be04d | ||
|
|
7a305d2892 | ||
|
|
927d6f03ce | ||
|
|
127a9aa7c4 | ||
|
|
2703fa75d6 | ||
|
|
605ec158f4 | ||
|
|
3b45d1b334 | ||
|
|
acb906164b | ||
|
|
98da4c2adb | ||
|
|
9e1c28fcff | ||
|
|
774db3fef4 | ||
|
|
8b90b0f0e3 | ||
|
|
a277489003 | ||
|
|
1084a37174 | ||
|
|
9e4dbf497c | ||
|
|
af0ca03752 | ||
|
|
37d9599dca | ||
|
|
08a676cfd4 | ||
|
|
f2e2060ff8 | ||
|
|
dc5f90025c | ||
|
|
8566a4f453 | ||
|
|
3366dc9a63 | ||
|
|
fa94799ec8 | ||
|
|
c424d1afee | ||
|
|
99f83a9bf0 | ||
|
|
aa7d4c5ecc | ||
|
|
552ee28072 | ||
|
|
fa77facacd | ||
|
|
5b28f3d964 | ||
|
|
c36748b8bc | ||
|
|
f16f5b0aa4 | ||
|
|
c889b92f45 | ||
|
|
46be20976a | ||
|
|
8c42f99d0b | ||
|
|
a93fd21f21 | ||
|
|
7523ea44f1 | ||
|
|
30ab0c0b71 | ||
|
|
a72f190ef6 | ||
|
|
cb60c1ec3b | ||
|
|
e108e04dda | ||
|
|
2e083bca28 | ||
|
|
198b13324d | ||
|
|
605dae3995 | ||
|
|
d2b0920454 | ||
|
|
2cbeca0e7c | ||
|
|
859e04a680 | ||
|
|
c0821d4ede | ||
|
|
c3a6e45920 | ||
|
|
818dfd6515 | ||
|
|
726b39aedd | ||
|
|
7638c21e92 | ||
|
|
c09d6075c6 | ||
|
|
39d37a7d28 | ||
|
|
de0315380d | ||
|
|
906ddacbc6 | ||
|
|
c388446668 | ||
|
|
d56971ca7c | ||
|
|
cb14d7530b | ||
|
|
fbb24c8c0a | ||
|
|
0b88b246d3 | ||
|
|
a4631f345b | ||
|
|
7be31ce3e5 | ||
|
|
57a7b8076f | ||
|
|
5309b1c02c | ||
|
|
ae09e6ebb7 | ||
|
|
3cd124dce3 | ||
|
|
25f5817a5e | ||
|
|
0510f19607 | ||
|
|
9d961c58ff | ||
|
|
ab25907050 | ||
|
|
6cbba80df1 | ||
|
|
082245c5db | ||
|
|
a82e020daa | ||
|
|
c8c2d44a5c | ||
|
|
4e7b8b57c0 | ||
|
|
e7020c7c0f | ||
|
|
cb54864387 | ||
|
|
0e0f385f72 | ||
|
|
b3cae25741 | ||
|
|
469b117a09 | ||
|
|
5f59734078 | ||
|
|
e00b3b9714 | ||
|
|
588872a316 | ||
|
|
a0feee257f | ||
|
|
a18ac330bb | ||
|
|
0901456320 | ||
|
|
dbd7b486d2 | ||
|
|
7e766282aa | ||
|
|
ba175148c8 | ||
|
|
358146ee54 | ||
|
|
e9dd16b023 | ||
|
|
1ba4098648 | ||
|
|
05fb8569b3 | ||
|
|
db45a375d0 | ||
|
|
81dd47a893 | ||
|
|
6b77a2a5a8 | ||
|
|
dc4cf3f57b | ||
|
|
d810b579a5 | ||
|
|
47c8de9dc3 | ||
|
|
74a53bde5e | ||
|
|
aad2615487 | ||
|
|
03d9b6a09f | ||
|
|
5d280cc8c8 | ||
|
|
133c4fb21e | ||
|
|
3b2d2c052e | ||
|
|
37c2a62fa8 | ||
|
|
2eeb56d1c8 | ||
|
|
a612b38f9b | ||
|
|
1c67ea65d9 | ||
|
|
c26fb5ad5f | ||
|
|
325c2cc385 | ||
|
|
2e551c9d0a | ||
|
|
7b44454d01 | ||
|
|
f2680b2f2d | ||
|
|
b42b8406a2 | ||
|
|
3c125e2191 | ||
|
|
9ed245c85e | ||
|
|
5b7807d54b | ||
|
|
156e2c1987 | ||
|
|
da4dfb3fdf | ||
|
|
0783ffe989 | ||
|
|
374105c1f3 | ||
|
|
bc6e499e4f | ||
|
|
07f822bb08 | ||
|
|
3a0332dfdc | ||
|
|
f6ab66c55b | ||
|
|
d874a5cfda | ||
|
|
c424215044 | ||
|
|
c5fdfd63db | ||
|
|
23a45eaf87 | ||
|
|
dee997b44e | ||
|
|
2929a739e3 | ||
|
|
3cccca8d09 | ||
|
|
7a7086e7aa | ||
|
|
565b14a148 | ||
|
|
ecc441de3f | ||
|
|
25335bb3c3 | ||
|
|
9b871567b1 | ||
|
|
264cf494e3 | ||
|
|
3f23501b8e | ||
|
|
5530030c67 | ||
|
|
8d3a83b07a | ||
|
|
a6270b44d5 | ||
|
|
2273b198a1 | ||
|
|
3d62e73f8c | ||
|
|
f5c639ae28 | ||
|
|
81016154c0 | ||
|
|
728829be7b | ||
|
|
c0b8f9d483 | ||
|
|
ced1c2321a | ||
|
|
1b8a656051 | ||
|
|
1753fa3530 | ||
|
|
8c0f3dfc79 | ||
|
|
dbda292f54 |
13
.dockerignore
Normal file
13
.dockerignore
Normal file
@@ -0,0 +1,13 @@
|
||||
Makefile
|
||||
acceptance
|
||||
runner
|
||||
hack
|
||||
test-assets
|
||||
config
|
||||
charts
|
||||
.github
|
||||
.envrc
|
||||
.env
|
||||
*.md
|
||||
*.txt
|
||||
*.sh
|
||||
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**Checks**
|
||||
|
||||
- [ ] My actions-runner-controller version (v0.x.y) does support the feature
|
||||
- [ ] I'm using an unreleased version of the controller I built from HEAD of the default branch
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Environment (please complete the following information):**
|
||||
- Controller Version [e.g. 0.18.2]
|
||||
- Deployment Method [e.g. Helm and Kustomize ]
|
||||
- Helm Chart Version [e.g. 0.11.0, if applicable]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
19
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
19
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
34
.github/RELEASE_NOTE_TEMPLATE.md
vendored
Normal file
34
.github/RELEASE_NOTE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
# Release Note Template
|
||||
|
||||
This is the template of actions-runner-controller's release notes.
|
||||
|
||||
Whenever a new release is made, I start by manually copy-pasting this template onto the GitHub UI for creating the release.
|
||||
|
||||
I then walk-through all the changes, take sometime to think abount best one-sentence explanations to tell the users about changes, write it all,
|
||||
and click the publish button.
|
||||
|
||||
If you think you can improve future release notes in any way, please do submit a pull request to change the template below.
|
||||
|
||||
Note that even though it looks like a Go template, I don't use any templating to generate the changelog.
|
||||
It's just that I'm used to reading and intepreting Go template by myself, not a computer program :)
|
||||
|
||||
**Title**:
|
||||
|
||||
```
|
||||
v{{ .Version }}: {{ .TitlesOfImportantChanges }}
|
||||
```
|
||||
|
||||
**Body**:
|
||||
|
||||
```
|
||||
**CAUTION:** If you're using the Helm chart, beware to review changes to CRDs and do manually upgrade CRDs! Helm installs CRDs only on installing a chart. It doesn't automatically upgrade CRDs. Otherwise you end up with troubles like #427, #467, and #468. Please refer to the [UPGRADING](charts/actions-runner-controller/docs/UPGRADING.md) docs for the latest process.
|
||||
|
||||
This release includes the following changes from contributors. Thank you!
|
||||
|
||||
- @{{ .GitHubUser }} fixed {{ .Feature }} to not break when ... (#{{ .PullRequestNumber }})
|
||||
- @{{ .GitHubUser }} enhanced {{ .Feature }} to ... (#{{ .PullRequestNumber }})
|
||||
- @{{ .GitHubUser }} added {{ .Feature }} for ... (#{{ .PullRequestNumber }})
|
||||
- @{{ .GitHubUser }} fixed {{ .Topic }} in the documentation so that ... (#{{ .PullRequestNumber }})
|
||||
- @{{ .GitHubUser }} added {{ .Topic }} to the documentation (#{{ .PullRequestNumber }})
|
||||
- @{{ .GitHubUser }} improved the documentation about {{ .Topic }} to also cover ... (#{{ .PullRequestNumber }})
|
||||
```
|
||||
25
.github/lock.yml
vendored
Normal file
25
.github/lock.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Configuration for Lock Threads
|
||||
# Repo: https://github.com/dessant/lock-threads-app
|
||||
# App: https://github.com/apps/lock
|
||||
|
||||
# Number of days of inactivity before a closed issue or pull request is locked
|
||||
daysUntilLock: 7
|
||||
|
||||
# Skip issues and pull requests created before a given timestamp. Timestamp must
|
||||
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
|
||||
skipCreatedBefore: false
|
||||
|
||||
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
|
||||
exemptLabels: []
|
||||
|
||||
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
||||
lockLabel: false
|
||||
|
||||
# Comment to post before locking. Set to `false` to disable
|
||||
lockComment: >
|
||||
This thread has been automatically locked since there has not been
|
||||
any recent activity after it was closed. Please open a new issue for
|
||||
related bugs.
|
||||
|
||||
# Assign `resolved` as the reason for locking. Set to `false` to disable
|
||||
setLockReason: true
|
||||
66
.github/stale.yml
vendored
Normal file
66
.github/stale.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
# Configuration for probot-stale - https://github.com/probot/stale
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||
daysUntilStale: 30
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||
daysUntilClose: 14
|
||||
|
||||
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||
onlyLabels: []
|
||||
|
||||
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||
exemptLabels:
|
||||
- pinned
|
||||
- security
|
||||
- enhancement
|
||||
- refactor
|
||||
- documentation
|
||||
- chore
|
||||
- needs-investigation
|
||||
- bug
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: false
|
||||
|
||||
# Set to true to ignore issues in a milestone (defaults to false)
|
||||
exemptMilestones: false
|
||||
|
||||
# Set to true to ignore issues with an assignee (defaults to false)
|
||||
exemptAssignees: false
|
||||
|
||||
# Label to use when marking as stale
|
||||
staleLabel: stale
|
||||
|
||||
# Comment to post when marking as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
|
||||
# Comment to post when removing the stale label.
|
||||
# unmarkComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Comment to post when closing a stale Issue or Pull Request.
|
||||
# closeComment: >
|
||||
# Your comment here.
|
||||
|
||||
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||
limitPerRun: 30
|
||||
|
||||
# Limit to only `issues` or `pulls`
|
||||
# only: issues
|
||||
|
||||
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||
# pulls:
|
||||
# daysUntilStale: 30
|
||||
# markComment: >
|
||||
# This pull request has been automatically marked as stale because it has not had
|
||||
# recent activity. It will be closed if no further activity occurs. Thank you
|
||||
# for your contributions.
|
||||
|
||||
# issues:
|
||||
# exemptLabels:
|
||||
# - confirmed
|
||||
70
.github/workflows/build-and-release-runners.yml
vendored
70
.github/workflows/build-and-release-runners.yml
vendored
@@ -13,25 +13,31 @@ on:
|
||||
paths:
|
||||
- runner/patched/*
|
||||
- runner/Dockerfile
|
||||
- runner/dindrunner.Dockerfile
|
||||
- runner/Dockerfile.ubuntu.1804
|
||||
- runner/Dockerfile.dindrunner
|
||||
- runner/entrypoint.sh
|
||||
- .github/workflows/build-and-release-runners.yml
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build ${{ matrix.name }}
|
||||
name: Build ${{ matrix.name }}-ubuntu-${{ matrix.os-version }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- name: actions-runner
|
||||
os-version: 20.04
|
||||
dockerfile: Dockerfile
|
||||
- name: actions-runner
|
||||
os-version: 18.04
|
||||
dockerfile: Dockerfile.ubuntu.1804
|
||||
- name: actions-runner-dind
|
||||
dockerfile: dindrunner.Dockerfile
|
||||
os-version: 20.04
|
||||
dockerfile: Dockerfile.dindrunner
|
||||
env:
|
||||
RUNNER_VERSION: 2.277.1
|
||||
RUNNER_VERSION: 2.278.0
|
||||
DOCKER_VERSION: 19.03.12
|
||||
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||
steps:
|
||||
- name: Set outputs
|
||||
id: vars
|
||||
@@ -52,10 +58,10 @@ jobs:
|
||||
uses: docker/login-action@v1
|
||||
if: ${{ github.event_name == 'push' || github.event_name == 'release' }}
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
|
||||
- name: Build and Push
|
||||
- name: Build and Push Versioned Tags
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./runner
|
||||
@@ -66,6 +72,52 @@ jobs:
|
||||
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ steps.vars.outputs.sha_short }}
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-ubuntu-${{ matrix.os-version }}
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-ubuntu-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
||||
|
||||
latest-tags:
|
||||
if: ${{ github.event_name == 'push' || github.event_name == 'release' }}
|
||||
runs-on: ubuntu-latest
|
||||
name: Build ${{ matrix.name }}-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- name: actions-runner
|
||||
dockerfile: Dockerfile
|
||||
- name: actions-runner-dind
|
||||
dockerfile: Dockerfile.dindrunner
|
||||
env:
|
||||
RUNNER_VERSION: 2.277.1
|
||||
DOCKER_VERSION: 19.03.12
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
|
||||
- name: Build and Push Latest Tag
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./runner
|
||||
file: ./runner/${{ matrix.dockerfile }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
build-args: |
|
||||
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||
tags: |
|
||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:latest
|
||||
|
||||
4
.github/workflows/on-push-lint-charts.yml
vendored
4
.github/workflows/on-push-lint-charts.yml
vendored
@@ -4,9 +4,11 @@ on:
|
||||
push:
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '!charts/actions-runner-controller/*.md'
|
||||
- '.github/**'
|
||||
- '!.github/*.md'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
KUBE_SCORE_VERSION: 1.10.0
|
||||
HELM_VERSION: v3.4.1
|
||||
|
||||
@@ -7,7 +7,9 @@ on:
|
||||
- main # assume that the branch name may change in future
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '!charts/actions-runner-controller/docs/**'
|
||||
- '.github/**'
|
||||
- '!**.md'
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Release
|
||||
env:
|
||||
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||
steps:
|
||||
- name: Set outputs
|
||||
id: vars
|
||||
@@ -47,7 +47,7 @@ jobs:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
|
||||
- name: Build and Push
|
||||
|
||||
12
.github/workflows/test.yaml
vendored
12
.github/workflows/test.yaml
vendored
@@ -7,6 +7,8 @@ on:
|
||||
paths-ignore:
|
||||
- 'runner/**'
|
||||
- .github/workflows/build-and-release-runners.yml
|
||||
- '*.md'
|
||||
- '.gitignore'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
@@ -15,11 +17,15 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.16.5'
|
||||
- run: go version
|
||||
- name: Install kubebuilder
|
||||
run: |
|
||||
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.2.0/kubebuilder_2.2.0_linux_amd64.tar.gz
|
||||
tar zxvf kubebuilder_2.2.0_linux_amd64.tar.gz
|
||||
sudo mv kubebuilder_2.2.0_linux_amd64 /usr/local/kubebuilder
|
||||
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
|
||||
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
|
||||
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
|
||||
- name: Run tests
|
||||
run: make test
|
||||
- name: Verify manifests are up-to-date
|
||||
|
||||
6
.github/workflows/wip.yml
vendored
6
.github/workflows/wip.yml
vendored
@@ -4,13 +4,15 @@ on:
|
||||
- master
|
||||
paths-ignore:
|
||||
- "runner/**"
|
||||
- "**.md"
|
||||
- ".gitignore"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
name: release-latest
|
||||
env:
|
||||
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
||||
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
@@ -27,7 +29,7 @@ jobs:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||
|
||||
# Considered unstable builds
|
||||
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
# Deploy Assets
|
||||
release
|
||||
|
||||
# Binaries for programs and plugins
|
||||
@@ -15,17 +16,21 @@ bin
|
||||
*.out
|
||||
|
||||
# Kubernetes Generated files - skip generated files, except for vendored files
|
||||
|
||||
!vendor/**/zz_generated.*
|
||||
|
||||
# editor and IDE paraphernalia
|
||||
.vscode
|
||||
.idea
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
.envrc
|
||||
.env
|
||||
.test.env
|
||||
*.pem
|
||||
|
||||
# OS
|
||||
.DS_STORE
|
||||
.DS_STORE
|
||||
|
||||
/test-assets
|
||||
|
||||
142
CONTRIBUTING.md
Normal file
142
CONTRIBUTING.md
Normal file
@@ -0,0 +1,142 @@
|
||||
## Contributing
|
||||
|
||||
### How to Contribute a Patch
|
||||
|
||||
Depending on what you are patching depends on how you should go about it. Below are some guides on how to test patches locally as well as develop the controller and runners.
|
||||
|
||||
When sumitting a PR for a change please provide evidence that your change works as we still need to work on improving the CI of the project. Some resources are provided for helping achieve this, see this guide for details.
|
||||
|
||||
#### Running an End to End Test
|
||||
|
||||
> **Notes for Ubuntu 20.04+ users**
|
||||
>
|
||||
> If you're using Ubuntu 20.04 or greater, you might have installed `docker` with `snap`.
|
||||
>
|
||||
> If you want to stick with `snap`-provided `docker`, do not forget to set `TMPDIR` to
|
||||
> somewhere under `$HOME`.
|
||||
> Otherwise `kind load docker-image` fail while running `docker save`.
|
||||
> See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap for more information.
|
||||
|
||||
To test your local changes against both PAT and App based authentication please run the `acceptance` make target with the authentication configuration details provided:
|
||||
|
||||
```shell
|
||||
# This sets `VERSION` envvar to some appropriate value
|
||||
. hack/make-env.sh
|
||||
|
||||
DOCKER_USER=*** \
|
||||
GITHUB_TOKEN=*** \
|
||||
APP_ID=*** \
|
||||
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||
INSTALLATION_ID=*** \
|
||||
make acceptance
|
||||
```
|
||||
|
||||
**Rerunning a failed test**
|
||||
|
||||
When one of tests run by `make acceptance` failed, you'd probably like to rerun only the failed one.
|
||||
|
||||
It can be done by `make acceptance/run` and by setting the combination of `ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm|kubectl` and `ACCEPTANCE_TEST_SECRET_TYPE=token|app` values that failed (note, you just need to set the corresponding authentication configuration in this circumstance)
|
||||
|
||||
In the example below, we rerun the test for the combination `ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token` only:
|
||||
|
||||
```shell
|
||||
DOCKER_USER=*** \
|
||||
GITHUB_TOKEN=*** \
|
||||
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm
|
||||
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
||||
make acceptance/run
|
||||
```
|
||||
|
||||
**Testing in a non-kind cluster**
|
||||
|
||||
If you prefer to test in a non-kind cluster, you can instead run:
|
||||
|
||||
```shell
|
||||
KUBECONFIG=path/to/kubeconfig \
|
||||
DOCKER_USER=*** \
|
||||
GITHUB_TOKEN=*** \
|
||||
APP_ID=*** \
|
||||
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||
INSTALLATION_ID=*** \
|
||||
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
||||
make docker-build acceptance/setup \
|
||||
acceptance/deploy \
|
||||
acceptance/tests
|
||||
```
|
||||
|
||||
#### Developing the Controller
|
||||
|
||||
Rerunning the whole acceptance test suite from scratch on every little change to the controller, the runner, and the chart would be counter-productive.
|
||||
|
||||
To make your development cycle faster, use the below command to update deploy and update all the three:
|
||||
|
||||
```shell
|
||||
# Let assume we have all other envvars like DOCKER_USER, GITHUB_TOKEN already set,
|
||||
# The below command will (re)build `actions-runner-controller:controller1` and `actions-runner:runner1`,
|
||||
# load those into kind nodes, and then rerun kubectl or helm to install/upgrade the controller,
|
||||
# and finally upgrade the runner deployment to use the new runner image.
|
||||
#
|
||||
# As helm 3 and kubectl is unable to recreate a pod when no tag change,
|
||||
# you either need to bump VERSION and RUNNER_TAG on each run,
|
||||
# or manually run `kubectl delete pod $POD` on respective pods for changes to actually take effect.
|
||||
|
||||
VERSION=controller1 \
|
||||
RUNNER_TAG=runner1 \
|
||||
make acceptance/pull acceptance/kind docker-build acceptance/load acceptance/deploy
|
||||
```
|
||||
|
||||
If you've already deployed actions-runner-controller and only want to recreate pods to use the newer image, you can run:
|
||||
|
||||
```shell
|
||||
NAME=$DOCKER_USER/actions-runner-controller \
|
||||
make docker-build acceptance/load && \
|
||||
kubectl -n actions-runner-system delete po $(kubectl -n actions-runner-system get po -ojsonpath={.items[*].metadata.name})
|
||||
```
|
||||
|
||||
Similarly, if you'd like to recreate runner pods with the newer runner image,
|
||||
|
||||
```shell
|
||||
NAME=$DOCKER_USER/actions-runner make \
|
||||
-C runner docker-{build,push}-ubuntu && \
|
||||
(kubectl get po -ojsonpath={.items[*].metadata.name} | xargs -n1 kubectl delete po)
|
||||
```
|
||||
|
||||
#### Developing the Runners
|
||||
|
||||
**Tests**
|
||||
|
||||
A set of example pipelines (./acceptance/pipelines) are provided in this repository which you can use to validate your runners are working as expected. When raising a PR please run the relevant suites to prove your change hasn't broken anything.
|
||||
|
||||
**Running Ginkgo Tests**
|
||||
|
||||
You can run the integration test suite that is written in Ginkgo with:
|
||||
|
||||
```shell
|
||||
make test-with-deps
|
||||
```
|
||||
|
||||
This will firstly install a few binaries required to setup the integration test environment and then runs `go test` to start the Ginkgo test.
|
||||
|
||||
If you don't want to use `make`, like when you're running tests from your IDE, install required binaries to `/usr/local/kubebuilder/bin`. That's the directory in which controller-runtime's `envtest` framework locates the binaries.
|
||||
|
||||
```shell
|
||||
sudo mkdir -p /usr/local/kubebuilder/bin
|
||||
make kube-apiserver etcd
|
||||
sudo mv test-assets/{etcd,kube-apiserver} /usr/local/kubebuilder/bin/
|
||||
go test -v -run TestAPIs github.com/actions-runner-controller/actions-runner-controller/controllers
|
||||
```
|
||||
|
||||
To run Ginkgo tests selectively, set the pattern of target test names to `GINKGO_FOCUS`.
|
||||
All the Ginkgo test that matches `GINKGO_FOCUS` will be run.
|
||||
|
||||
```shell
|
||||
GINKGO_FOCUS='[It] should create a new Runner resource from the specified template, add a another Runner on replicas increased, and removes all the replicas when set to 0' \
|
||||
go test -v -run TestAPIs github.com/actions-runner-controller/actions-runner-controller/controllers
|
||||
```
|
||||
|
||||
#### Helm Version Bumps
|
||||
|
||||
**Chart Version :** When bumping the chart version follow semantic versioning https://semver.org/<br />
|
||||
**App Version :** When bumping the app version you will also need to bump the chart version too. Again, follow semantic versioning when bumping the chart.
|
||||
|
||||
To determine if you need to bump the MAJOR, MINOR or PATCH versions you will need to review the changes between the previous app version and the new app version and / or ask for a maintainer to advise.
|
||||
195
Makefile
195
Makefile
@@ -1,11 +1,30 @@
|
||||
NAME ?= summerwind/actions-runner-controller
|
||||
ifdef DOCKER_USER
|
||||
NAME ?= ${DOCKER_USER}/actions-runner-controller
|
||||
else
|
||||
NAME ?= summerwind/actions-runner-controller
|
||||
endif
|
||||
DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1)
|
||||
VERSION ?= latest
|
||||
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||
RUNNER_TAG ?= ${VERSION}
|
||||
TEST_REPO ?= ${DOCKER_USER}/actions-runner-controller
|
||||
TEST_ORG ?=
|
||||
TEST_ORG_REPO ?=
|
||||
SYNC_PERIOD ?= 5m
|
||||
USE_RUNNERSET ?=
|
||||
KUBECONTEXT ?= kind-acceptance
|
||||
CLUSTER ?= acceptance
|
||||
CERT_MANAGER_VERSION ?= v1.1.1
|
||||
|
||||
# From https://github.com/VictoriaMetrics/operator/pull/44
|
||||
YAML_DROP=$(YQ) delete --inplace
|
||||
YAML_DROP_PREFIX=spec.validation.openAPIV3Schema.properties.spec.properties
|
||||
|
||||
# If you encounter errors like the below, you are very likely to update this to follow e.g. CRD version change:
|
||||
# CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: spec.preserveUnknownFields: Invalid value: true: must be false in order to use defaults in the schema
|
||||
YAML_DROP_PREFIX=spec.versions[0].schema.openAPIV3Schema.properties.spec.properties
|
||||
|
||||
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
|
||||
CRD_OPTIONS ?= "crd:trivialVersions=true"
|
||||
CRD_OPTIONS ?= "crd:trivialVersions=true,generateEmbeddedObjectMeta=true"
|
||||
|
||||
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||
ifeq (,$(shell go env GOBIN))
|
||||
@@ -14,6 +33,8 @@ else
|
||||
GOBIN=$(shell go env GOBIN)
|
||||
endif
|
||||
|
||||
TEST_ASSETS=$(PWD)/test-assets
|
||||
|
||||
# default list of platforms for which multiarch image is built
|
||||
ifeq (${PLATFORMS}, )
|
||||
export PLATFORMS="linux/amd64,linux/arm64"
|
||||
@@ -22,8 +43,8 @@ endif
|
||||
# if IMG_RESULT is unspecified, by default the image will be pushed to registry
|
||||
ifeq (${IMG_RESULT}, load)
|
||||
export PUSH_ARG="--load"
|
||||
# if load is specified, image will be built only for the build machine architecture.
|
||||
export PLATFORMS="local"
|
||||
# if load is specified, image will be built only for the build machine architecture.
|
||||
export PLATFORMS="local"
|
||||
else ifeq (${IMG_RESULT}, cache)
|
||||
# if cache is specified, image will only be available in the build cache, it won't be pushed or loaded
|
||||
# therefore no PUSH_ARG will be specified
|
||||
@@ -33,9 +54,18 @@ endif
|
||||
|
||||
all: manager
|
||||
|
||||
GO_TEST_ARGS ?= -short
|
||||
|
||||
# Run tests
|
||||
test: generate fmt vet manifests
|
||||
go test ./... -coverprofile cover.out
|
||||
go test $(GO_TEST_ARGS) ./... -coverprofile cover.out
|
||||
|
||||
test-with-deps: kube-apiserver etcd kubectl
|
||||
# See https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants
|
||||
TEST_ASSET_KUBE_APISERVER=$(KUBE_APISERVER_BIN) \
|
||||
TEST_ASSET_ETCD=$(ETCD_BIN) \
|
||||
TEST_ASSET_KUBECTL=$(KUBECTL_BIN) \
|
||||
make test
|
||||
|
||||
# Build manager binary
|
||||
manager: generate fmt vet
|
||||
@@ -96,12 +126,9 @@ generate: controller-gen
|
||||
$(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths="./..."
|
||||
|
||||
# Build the docker image
|
||||
docker-build: test
|
||||
docker-build:
|
||||
docker build . -t ${NAME}:${VERSION}
|
||||
|
||||
# Push the docker image
|
||||
docker-push:
|
||||
docker push ${NAME}:${VERSION}
|
||||
docker build runner -t ${RUNNER_NAME}:${RUNNER_TAG} --build-arg TARGETPLATFORM=$(shell arch)
|
||||
|
||||
docker-buildx:
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
@@ -115,6 +142,11 @@ docker-buildx:
|
||||
-f Dockerfile \
|
||||
. ${PUSH_ARG}
|
||||
|
||||
# Push the docker image
|
||||
docker-push:
|
||||
docker push ${NAME}:${VERSION}
|
||||
docker push ${RUNNER_NAME}:${RUNNER_TAG}
|
||||
|
||||
# Generate the release manifest file
|
||||
release: manifests
|
||||
cd config/manager && kustomize edit set image controller=${NAME}:${VERSION}
|
||||
@@ -126,19 +158,41 @@ release/clean:
|
||||
rm -rf release
|
||||
|
||||
.PHONY: acceptance
|
||||
acceptance: release/clean docker-build docker-push release
|
||||
ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
||||
ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
||||
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
||||
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
||||
acceptance: release/clean acceptance/pull docker-build release
|
||||
ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/run
|
||||
ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/run
|
||||
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/run
|
||||
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/run
|
||||
|
||||
acceptance/run: acceptance/kind acceptance/load acceptance/setup acceptance/deploy acceptance/tests acceptance/teardown
|
||||
|
||||
acceptance/kind:
|
||||
kind create cluster --name acceptance
|
||||
kubectl cluster-info --context kind-acceptance
|
||||
kind create cluster --name ${CLUSTER} --config acceptance/kind.yaml
|
||||
|
||||
# Set TMPDIR to somewhere under $HOME when you use docker installed with Ubuntu snap
|
||||
# Otherwise `load docker-image` fail while running `docker save`.
|
||||
# See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap
|
||||
acceptance/load:
|
||||
kind load docker-image ${NAME}:${VERSION} --name ${CLUSTER}
|
||||
kind load docker-image quay.io/brancz/kube-rbac-proxy:v0.10.0 --name ${CLUSTER}
|
||||
kind load docker-image ${RUNNER_NAME}:${RUNNER_TAG} --name ${CLUSTER}
|
||||
kind load docker-image docker:dind --name ${CLUSTER}
|
||||
kind load docker-image quay.io/jetstack/cert-manager-controller:$(CERT_MANAGER_VERSION) --name ${CLUSTER}
|
||||
kind load docker-image quay.io/jetstack/cert-manager-cainjector:$(CERT_MANAGER_VERSION) --name ${CLUSTER}
|
||||
kind load docker-image quay.io/jetstack/cert-manager-webhook:$(CERT_MANAGER_VERSION) --name ${CLUSTER}
|
||||
kubectl cluster-info --context ${KUBECONTEXT}
|
||||
|
||||
# Pull the docker images for acceptance
|
||||
acceptance/pull:
|
||||
docker pull quay.io/brancz/kube-rbac-proxy:v0.10.0
|
||||
docker pull docker:dind
|
||||
docker pull quay.io/jetstack/cert-manager-controller:$(CERT_MANAGER_VERSION)
|
||||
docker pull quay.io/jetstack/cert-manager-cainjector:$(CERT_MANAGER_VERSION)
|
||||
docker pull quay.io/jetstack/cert-manager-webhook:$(CERT_MANAGER_VERSION)
|
||||
|
||||
acceptance/setup:
|
||||
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.yaml #kubectl create namespace actions-runner-system
|
||||
kubectl -n cert-manager wait deploy/cert-manager-cainjector --for condition=available --timeout 60s
|
||||
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/$(CERT_MANAGER_VERSION)/cert-manager.yaml #kubectl create namespace actions-runner-system
|
||||
kubectl -n cert-manager wait deploy/cert-manager-cainjector --for condition=available --timeout 90s
|
||||
kubectl -n cert-manager wait deploy/cert-manager-webhook --for condition=available --timeout 60s
|
||||
kubectl -n cert-manager wait deploy/cert-manager --for condition=available --timeout 60s
|
||||
kubectl create namespace actions-runner-system || true
|
||||
@@ -146,18 +200,35 @@ acceptance/setup:
|
||||
sleep 5
|
||||
|
||||
acceptance/teardown:
|
||||
kind delete cluster --name acceptance
|
||||
kind delete cluster --name ${CLUSTER}
|
||||
|
||||
acceptance/deploy:
|
||||
NAME=${NAME} DOCKER_USER=${DOCKER_USER} VERSION=${VERSION} RUNNER_NAME=${RUNNER_NAME} RUNNER_TAG=${RUNNER_TAG} TEST_REPO=${TEST_REPO} \
|
||||
TEST_ORG=${TEST_ORG} TEST_ORG_REPO=${TEST_ORG_REPO} SYNC_PERIOD=${SYNC_PERIOD} \
|
||||
USE_RUNNERSET=${USE_RUNNERSET} \
|
||||
acceptance/deploy.sh
|
||||
|
||||
acceptance/tests:
|
||||
acceptance/deploy.sh
|
||||
acceptance/checks.sh
|
||||
|
||||
# We use -count=1 instead of `go clean -testcache`
|
||||
# See https://terratest.gruntwork.io/docs/testing-best-practices/avoid-test-caching/
|
||||
.PHONY: e2e
|
||||
e2e:
|
||||
go test -count=1 -v -timeout 600s -run '^TestE2E$$' ./test/e2e
|
||||
|
||||
# Upload release file to GitHub.
|
||||
github-release: release
|
||||
ghr ${VERSION} release/
|
||||
|
||||
# find or download controller-gen
|
||||
# download controller-gen if necessary
|
||||
# Find or download controller-gen
|
||||
#
|
||||
# Note that controller-gen newer than 0.4.1 is needed for https://github.com/kubernetes-sigs/controller-tools/issues/444#issuecomment-680168439
|
||||
# Otherwise we get errors like the below:
|
||||
# Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property]
|
||||
#
|
||||
# Note that controller-gen newer than 0.6.0 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
|
||||
# Otherwise ObjectMeta embedded in Spec results in empty on the storage.
|
||||
controller-gen:
|
||||
ifeq (, $(shell which controller-gen))
|
||||
ifeq (, $(wildcard $(GOBIN)/controller-gen))
|
||||
@@ -166,7 +237,7 @@ ifeq (, $(wildcard $(GOBIN)/controller-gen))
|
||||
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$CONTROLLER_GEN_TMP_DIR ;\
|
||||
go mod init tmp ;\
|
||||
go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.3.0 ;\
|
||||
go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.6.0 ;\
|
||||
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
|
||||
}
|
||||
endif
|
||||
@@ -191,3 +262,77 @@ ifeq (, $(wildcard $(GOBIN)/yq))
|
||||
}
|
||||
endif
|
||||
YQ=$(GOBIN)/yq
|
||||
|
||||
OS_NAME := $(shell uname -s | tr A-Z a-z)
|
||||
|
||||
# find or download etcd
|
||||
etcd:
|
||||
ifeq (, $(shell which etcd))
|
||||
ifeq (, $(wildcard $(TEST_ASSETS)/etcd))
|
||||
@{ \
|
||||
set -xe ;\
|
||||
INSTALL_TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$INSTALL_TMP_DIR ;\
|
||||
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mkdir -p $(TEST_ASSETS) ;\
|
||||
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||
rm -rf $$INSTALL_TMP_DIR ;\
|
||||
}
|
||||
ETCD_BIN=$(TEST_ASSETS)/etcd
|
||||
else
|
||||
ETCD_BIN=$(TEST_ASSETS)/etcd
|
||||
endif
|
||||
else
|
||||
ETCD_BIN=$(shell which etcd)
|
||||
endif
|
||||
|
||||
# find or download kube-apiserver
|
||||
kube-apiserver:
|
||||
ifeq (, $(shell which kube-apiserver))
|
||||
ifeq (, $(wildcard $(TEST_ASSETS)/kube-apiserver))
|
||||
@{ \
|
||||
set -xe ;\
|
||||
INSTALL_TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$INSTALL_TMP_DIR ;\
|
||||
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mkdir -p $(TEST_ASSETS) ;\
|
||||
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||
rm -rf $$INSTALL_TMP_DIR ;\
|
||||
}
|
||||
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
|
||||
else
|
||||
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
|
||||
endif
|
||||
else
|
||||
KUBE_APISERVER_BIN=$(shell which kube-apiserver)
|
||||
endif
|
||||
|
||||
# find or download kubectl
|
||||
kubectl:
|
||||
ifeq (, $(shell which kubectl))
|
||||
ifeq (, $(wildcard $(TEST_ASSETS)/kubectl))
|
||||
@{ \
|
||||
set -xe ;\
|
||||
INSTALL_TMP_DIR=$$(mktemp -d) ;\
|
||||
cd $$INSTALL_TMP_DIR ;\
|
||||
wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mkdir -p $(TEST_ASSETS) ;\
|
||||
tar zxvf kubebuilder_2.3.2_$(OS_NAME)_amd64.tar.gz ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/etcd $(TEST_ASSETS)/etcd ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kube-apiserver $(TEST_ASSETS)/kube-apiserver ;\
|
||||
mv kubebuilder_2.3.2_$(OS_NAME)_amd64/bin/kubectl $(TEST_ASSETS)/kubectl ;\
|
||||
rm -rf $$INSTALL_TMP_DIR ;\
|
||||
}
|
||||
KUBECTL_BIN=$(TEST_ASSETS)/kubectl
|
||||
else
|
||||
KUBECTL_BIN=$(TEST_ASSETS)/kubectl
|
||||
endif
|
||||
else
|
||||
KUBECTL_BIN=$(shell which kubectl)
|
||||
endif
|
||||
|
||||
2
PROJECT
2
PROJECT
@@ -1,5 +1,5 @@
|
||||
domain: summerwind.dev
|
||||
repo: github.com/summerwind/actions-runner-controller
|
||||
repo: github.com/actions-runner-controller/actions-runner-controller
|
||||
resources:
|
||||
- group: actions
|
||||
kind: Runner
|
||||
|
||||
675
README.md
675
README.md
@@ -8,25 +8,29 @@ ToC:
|
||||
|
||||
- [Motivation](#motivation)
|
||||
- [Installation](#installation)
|
||||
- [GitHub Enterprise support](#github-enterprise-support)
|
||||
- [Setting up authentication with GitHub API](#setting-up-authentication-with-github-api)
|
||||
- [Deploying using GitHub App Authentication](#deploying-using-github-app-authentication)
|
||||
- [Deploying using PAT Authentication](#deploying-using-pat-authentication)
|
||||
- [GitHub Enterprise Support](#github-enterprise-support)
|
||||
- [Setting Up Authentication with GitHub API](#setting-up-authentication-with-github-api)
|
||||
- [Deploying Using GitHub App Authentication](#deploying-using-github-app-authentication)
|
||||
- [Deploying Using PAT Authentication](#deploying-using-pat-authentication)
|
||||
- [Usage](#usage)
|
||||
- [Repository Runners](#repository-runners)
|
||||
- [Organization Runners](#organization-runners)
|
||||
- [Enterprise Runners](#enterprise-runners)
|
||||
- [Runner Deployments](#runnerdeployments)
|
||||
- [Autoscaling](#autoscaling)
|
||||
- [Faster Autoscaling with GitHub Webhook](#faster-autoscaling-with-github-webhook)
|
||||
- [Note on scaling to/from 0](#note-on-scaling-tofrom-0)
|
||||
- [Autoscaling](#autoscaling)
|
||||
- [Faster Autoscaling with GitHub Webhook](#faster-autoscaling-with-github-webhook)
|
||||
- [Autoscaling to/from 0](#autoscaling-tofrom-0)
|
||||
- [Scheduled Overrides](#scheduled-overrides)
|
||||
- [Runner with DinD](#runner-with-dind)
|
||||
- [Additional tweaks](#additional-tweaks)
|
||||
- [Runner labels](#runner-labels)
|
||||
- [Runner groups](#runner-groups)
|
||||
- [Using EKS IAM role for service accounts](#using-eks-iam-role-for-service-accounts)
|
||||
- [Software installed in the runner image](#software-installed-in-the-runner-image)
|
||||
- [Common errors](#common-errors)
|
||||
- [Developing](#developing)
|
||||
- [Alternatives](#alternatives)
|
||||
- [Additional Tweaks](#additional-tweaks)
|
||||
- [Runner Labels](#runner-labels)
|
||||
- [Runner Groups](#runner-groups)
|
||||
- [Using IRSA (IAM Roles for Service Accounts) in EKS](#using-irsa-iam-roles-for-service-accounts-in-eks)
|
||||
- [Stateful Runners](#stateful-runners)
|
||||
- [Software Installed in the Runner Image](#software-installed-in-the-runner-image)
|
||||
- [Common Errors](#common-errors)
|
||||
- [Contributing](#contributing)
|
||||
|
||||
## Motivation
|
||||
|
||||
@@ -42,87 +46,83 @@ actions-runner-controller uses [cert-manager](https://cert-manager.io/docs/insta
|
||||
|
||||
Install the custom resource and actions-runner-controller with `kubectl` or `helm`. This will create actions-runner-system namespace in your Kubernetes and deploy the required resources.
|
||||
|
||||
`kubectl`:
|
||||
**Kubectl Deployment:**
|
||||
|
||||
```shell
|
||||
# REPLACE "v0.17.0" with the version you wish to deploy
|
||||
kubectl apply -f https://github.com/summerwind/actions-runner-controller/releases/download/v0.17.0/actions-runner-controller.yaml
|
||||
# REPLACE "v0.18.2" with the version you wish to deploy
|
||||
kubectl apply -f https://github.com/actions-runner-controller/actions-runner-controller/releases/download/v0.18.2/actions-runner-controller.yaml
|
||||
```
|
||||
|
||||
`helm`:
|
||||
**Helm Deployment:**
|
||||
|
||||
__**Note: For all configuration options for the Helm chart see the chart's [README](./charts/actions-runner-controller/README.md)
|
||||
|
||||
```shell
|
||||
helm repo add actions-runner-controller https://summerwind.github.io/actions-runner-controller
|
||||
helm upgrade --install -n actions-runner-system actions-runner-controller/actions-runner-controller
|
||||
helm repo add actions-runner-controller https://actions-runner-controller.github.io/actions-runner-controller
|
||||
helm upgrade --install --namespace actions-runner-system --create-namespace \
|
||||
--wait actions-runner-controller actions-runner-controller/actions-runner-controller
|
||||
```
|
||||
|
||||
### Github Enterprise support
|
||||
### GitHub Enterprise Support
|
||||
|
||||
If you use either Github Enterprise Cloud or Server, you can use **actions-runner-controller** with those, too.
|
||||
Authentication works same way as with public Github (repo and organization level).
|
||||
The minimum version of Github Enterprise Server is 3.0.0 (or rc1/rc2).
|
||||
__**NOTE : The maintainers do not have an Enterprise environment to be able to test changes and so are reliant on the community for testing, support is a best endeavors basis only and is community driven**__
|
||||
The solution supports both GitHub Enterprise Cloud and Server editions as well as regular GitHub. Both PAT (personal access token) and GitHub App authentication works for installations that will be deploying either repository level and / or organization level runners. If you need to deploy enterprise level runners then you are restricted to PAT based authentication as GitHub doesn't support GitHub App based authentication for enterprise runners currently.
|
||||
|
||||
If you are deploying this solution into a GitHub Enterprise Server environment then you will need version >= [3.0.0](https://docs.github.com/en/enterprise-server@3.0/admin/release-notes#3.0.0).
|
||||
|
||||
When deploying the solution for a GitHub Enterprise Server environment you need to provide an additional environment variable as part of the controller deployment:
|
||||
|
||||
```shell
|
||||
kubectl set env deploy controller-manager -c manager GITHUB_ENTERPRISE_URL=<GHEC/S URL> --namespace actions-runner-system
|
||||
```
|
||||
|
||||
#### Enterprise runners usage
|
||||
__**Note: The repository maintainers do not have an enterprise environment (cloud or server). Support for the enterprise specific feature set is community driven and on a best effort basis. PRs from the community are welcomed to add features and maintain support.**__
|
||||
|
||||
In order to use enterprise runners you must have Admin access to Github Enterprise and you should do Personal Access Token (PAT)
|
||||
with `enterprise:admin` access. Enterprise runners are not possible to run with Github APP or any other permission.
|
||||
|
||||
When you use enterprise runners those will get access to Github Organisations. However, access to the repositories is **NOT**
|
||||
allowed by default. Each Github Organisation must allow Enterprise runner groups to be used in repositories.
|
||||
This is needed only one time and is permanent after that.
|
||||
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: ghe-runner-deployment
|
||||
spec:
|
||||
replicas: 2
|
||||
template:
|
||||
spec:
|
||||
enterprise: your-enterprise-name
|
||||
dockerdWithinRunnerContainer: true
|
||||
resources:
|
||||
limits:
|
||||
cpu: "4000m"
|
||||
memory: "2Gi"
|
||||
requests:
|
||||
cpu: "200m"
|
||||
memory: "200Mi"
|
||||
volumeMounts:
|
||||
- mountPath: /runner
|
||||
name: runner
|
||||
volumes:
|
||||
- name: runner
|
||||
emptyDir: {}
|
||||
|
||||
```
|
||||
|
||||
## Setting up authentication with GitHub API
|
||||
## Setting Up Authentication with GitHub API
|
||||
|
||||
There are two ways for actions-runner-controller to authenticate with the GitHub API (only 1 can be configured at a time however):
|
||||
|
||||
1. Using GitHub App.
|
||||
2. Using Personal Access Token.
|
||||
1. Using a GitHub App (not supported for enterprise level runners due to lack of support from GitHub)
|
||||
2. Using a PAT
|
||||
|
||||
Functionality wise there isn't a difference between the 2 authentication methods. There are however some benefits to using a GitHub App for authentication over a PAT such as an [increased API quota](https://docs.github.com/en/developers/apps/rate-limits-for-github-apps), if you run into rate limiting consider deploying this solution using GitHub App authentication instead.
|
||||
Functionality wise, there isn't much of a difference between the 2 authentication methods. The primarily benefit of authenticating via a GitHub App is an [increased API quota](https://docs.github.com/en/developers/apps/rate-limits-for-github-apps).
|
||||
|
||||
### Deploying using GitHub App Authentication
|
||||
If you are deploying the solution for a GitHub Enterprise Server environment you are able to [configure your rate limiting settings](https://docs.github.com/en/enterprise-server@3.0/admin/configuration/configuring-rate-limits) making the main benefit irrelevant. If you're deploying the solution for a GitHub Enterprise Cloud or regular GitHub environment and you run into rate limiting issues, consider deploying the solution using the GitHub App authentication method instead.
|
||||
|
||||
You can create a GitHub App for either your account or any organization. If you want to create a GitHub App for your account, open the following link to the creation page, enter any unique name in the "GitHub App name" field, and hit the "Create GitHub App" button at the bottom of the page.
|
||||
### Deploying Using GitHub App Authentication
|
||||
|
||||
- [Create GitHub Apps on your account](https://github.com/settings/apps/new?url=http://github.com/summerwind/actions-runner-controller&webhook_active=false&public=false&administration=write&actions=read)
|
||||
You can create a GitHub App for either your user account or any organization, below are the app permissions required for each supported type of runner:
|
||||
|
||||
_Note: Links are provided further down to create an app for your logged in user account or an organisation with the permissions for all runner types set in each link's query string_
|
||||
|
||||
**Required Permissions for Repository Runners:**<br />
|
||||
**Repository Permissions**
|
||||
|
||||
* Actions (read)
|
||||
* Administration (read / write)
|
||||
* Metadata (read)
|
||||
|
||||
**Required Permissions for Organisation Runners:**<br />
|
||||
**Repository Permissions**
|
||||
|
||||
* Actions (read)
|
||||
* Metadata (read)
|
||||
|
||||
**Organization Permissions**
|
||||
* Self-hosted runners (read / write)
|
||||
|
||||
_Note: All API routes mapped to their permissions can be found [here](https://docs.github.com/en/rest/reference/permissions-required-for-github-apps) if you wish to review_
|
||||
|
||||
---
|
||||
|
||||
**Setup Steps**
|
||||
|
||||
If you want to create a GitHub App for your account, open the following link to the creation page, enter any unique name in the "GitHub App name" field, and hit the "Create GitHub App" button at the bottom of the page.
|
||||
|
||||
- [Create GitHub Apps on your account](https://github.com/settings/apps/new?url=http://github.com/actions-runner-controller/actions-runner-controller&webhook_active=false&public=false&administration=write&actions=read)
|
||||
|
||||
If you want to create a GitHub App for your organization, replace the `:org` part of the following URL with your organization name before opening it. Then enter any unique name in the "GitHub App name" field, and hit the "Create GitHub App" button at the bottom of the page to create a GitHub App.
|
||||
|
||||
- [Create GitHub Apps on your organization](https://github.com/organizations/:org/settings/apps/new?url=http://github.com/summerwind/actions-runner-controller&webhook_active=false&public=false&administration=write&organization_self_hosted_runners=write&actions=read)
|
||||
- [Create GitHub Apps on your organization](https://github.com/organizations/:org/settings/apps/new?url=http://github.com/actions-runner-controller/actions-runner-controller&webhook_active=false&public=false&administration=write&organization_self_hosted_runners=write&actions=read)
|
||||
|
||||
You will see an *App ID* on the page of the GitHub App you created as follows, the value of this App ID will be used later.
|
||||
|
||||
@@ -141,8 +141,11 @@ When the installation is complete, you will be taken to a URL in one of the foll
|
||||
- `https://github.com/settings/installations/${INSTALLATION_ID}`
|
||||
- `https://github.com/organizations/eventreactor/settings/installations/${INSTALLATION_ID}`
|
||||
|
||||
|
||||
Finally, register the App ID (`APP_ID`), Installation ID (`INSTALLATION_ID`), and downloaded private key file (`PRIVATE_KEY_FILE_PATH`) to Kubernetes as Secret.
|
||||
|
||||
**Kubectl Deployment:**
|
||||
|
||||
```shell
|
||||
$ kubectl create secret generic controller-manager \
|
||||
-n actions-runner-system \
|
||||
@@ -151,29 +154,41 @@ $ kubectl create secret generic controller-manager \
|
||||
--from-file=github_app_private_key=${PRIVATE_KEY_FILE_PATH}
|
||||
```
|
||||
|
||||
### Deploying using PAT Authentication
|
||||
**Helm Deployment:**
|
||||
|
||||
Personal Acess Token can be used to register a self-hosted runner by *actions-runner-controller*.
|
||||
Configure your values.yaml, see the chart's [README](./charts/actions-runner-controller/README.md) for deploying the secret via Helm
|
||||
|
||||
Self-hosted runners in GitHub can either be connected to a single repository, or to a GitHub organization (so they are available to all repositories in the organization). How you plan on using the runner will affect what scopes are needed for the token.
|
||||
### Deploying Using PAT Authentication
|
||||
|
||||
Personal Access Tokens can be used to register a self-hosted runner by *actions-runner-controller*.
|
||||
|
||||
Log-in to a GitHub account that has `admin` privileges for the repository, and [create a personal access token](https://github.com/settings/tokens/new) with the appropriate scopes listed below:
|
||||
|
||||
**Scopes for a Repository Runner**
|
||||
**Required Scopes for Repository Runners**
|
||||
|
||||
* repo (Full control)
|
||||
|
||||
**Scopes for a Organisation Runner**
|
||||
**Required Scopes for Organization Runners**
|
||||
|
||||
* repo (Full control)
|
||||
* admin:org (Full control)
|
||||
* admin:public_key - read:public_key
|
||||
* admin:repo_hook - read:repo_hook
|
||||
* admin:org_hook
|
||||
* notifications
|
||||
* workflow
|
||||
* admin:public_key (read:public_key)
|
||||
* admin:repo_hook (read:repo_hook)
|
||||
* admin:org_hook (Full control)
|
||||
* notifications (Full control)
|
||||
* workflow (Full control)
|
||||
|
||||
Once you have created the appropriate token, deploy it as a secret to your kubernetes cluster that you are going to deploy the solution on:
|
||||
**Required Scopes for Enterprise Runners**
|
||||
|
||||
* enterprise:admin (Full control)
|
||||
|
||||
_Note: When you deploy enterprise runners they will get access to organisations, however, access to the repositories themselves is **NOT** allowed by default. Each GitHub organisation must allow enterprise runner groups to be used in repositories as an initial one time configuration step, this only needs to be done once after which it is permanent for that runner group._
|
||||
|
||||
---
|
||||
|
||||
Once you have created the appropriate token, deploy it as a secret to your Kubernetes cluster that you are going to deploy the solution on:
|
||||
|
||||
**Kubectl Deployment:**
|
||||
|
||||
```shell
|
||||
kubectl create secret generic controller-manager \
|
||||
@@ -181,8 +196,17 @@ kubectl create secret generic controller-manager \
|
||||
--from-literal=github_token=${GITHUB_TOKEN}
|
||||
```
|
||||
|
||||
**Helm Deployment:**
|
||||
|
||||
Configure your values.yaml, see the chart's [README](./charts/actions-runner-controller/README.md) for deploying the secret via Helm
|
||||
|
||||
## Usage
|
||||
|
||||
[GitHub self-hosted runners can be deployed at various levels in a management hierarchy](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners#about-self-hosted-runners):
|
||||
- The repository level
|
||||
- The organization level
|
||||
- The enterprise level
|
||||
|
||||
There are two ways to use this controller:
|
||||
|
||||
- Manage runners one by one with `Runner`.
|
||||
@@ -190,7 +214,7 @@ There are two ways to use this controller:
|
||||
|
||||
### Repository Runners
|
||||
|
||||
To launch a single self-hosted runner, you need to create a manifest file includes *Runner* resource as follows. This example launches a self-hosted runner with name *example-runner* for the *summerwind/actions-runner-controller* repository.
|
||||
To launch a single self-hosted runner, you need to create a manifest file includes `Runner` resource as follows. This example launches a self-hosted runner with name *example-runner* for the *actions-runner-controller/actions-runner-controller* repository.
|
||||
|
||||
```yaml
|
||||
# runner.yaml
|
||||
@@ -199,7 +223,7 @@ kind: Runner
|
||||
metadata:
|
||||
name: example-runner
|
||||
spec:
|
||||
repository: summerwind/actions-runner-controller
|
||||
repository: actions-runner-controller/actions-runner-controller
|
||||
env: []
|
||||
```
|
||||
|
||||
@@ -215,7 +239,7 @@ You can see that the Runner resource has been created.
|
||||
```shell
|
||||
$ kubectl get runners
|
||||
NAME REPOSITORY STATUS
|
||||
example-runner summerwind/actions-runner-controller Running
|
||||
example-runner actions-runner-controller/actions-runner-controller Running
|
||||
```
|
||||
|
||||
You can also see that the runner pod has been running.
|
||||
@@ -248,6 +272,22 @@ spec:
|
||||
|
||||
Now you can see the runner on the organization level (if you have organization owner permissions).
|
||||
|
||||
### Enterprise Runners
|
||||
|
||||
To add the runner to an enterprise, you only need to replace the `repository` field with `enterprise`, so the runner will register itself to the enterprise.
|
||||
|
||||
```yaml
|
||||
# runner.yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: Runner
|
||||
metadata:
|
||||
name: example-enterprise-runner
|
||||
spec:
|
||||
enterprise: your-enterprise-name
|
||||
```
|
||||
|
||||
Now you can see the runner on the enterprise level (if you have enterprise access permissions).
|
||||
|
||||
### RunnerDeployments
|
||||
|
||||
There are `RunnerReplicaSet` and `RunnerDeployment` that corresponds to `ReplicaSet` and `Deployment` but for `Runner`.
|
||||
@@ -271,7 +311,7 @@ spec:
|
||||
Apply the manifest file to your cluster:
|
||||
|
||||
```shell
|
||||
$ kubectl apply -f runner.yaml
|
||||
$ kubectl apply -f runnerdeployment.yaml
|
||||
runnerdeployment.actions.summerwind.dev/example-runnerdeploy created
|
||||
```
|
||||
|
||||
@@ -284,35 +324,61 @@ example-runnerdeploy2475h595fr mumoshu/actions-runner-controller-ci Running
|
||||
example-runnerdeploy2475ht2qbr mumoshu/actions-runner-controller-ci Running
|
||||
```
|
||||
|
||||
#### Autoscaling
|
||||
##### Note on scaling to/from 0
|
||||
|
||||
A `RunnerDeployment` can scale the number of runners between `minReplicas` and `maxReplicas` fields based the chosen scaling metric as defined in the `metrics` attribute
|
||||
> This feature is available since actions-runner-controller v0.19.0
|
||||
|
||||
You can either delete the runner deployment, or update it to have `replicas: 0`, so that there will be 0 runner pods in the cluster. This, in combination with e.g. `cluster-autoscaler`, enables you to save your infrastructure cost when there's no need to run Actions jobs.
|
||||
|
||||
```yaml
|
||||
# runnerdeployment.yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: example-runnerdeploy
|
||||
spec:
|
||||
replicas: 0
|
||||
```
|
||||
|
||||
The implication of setting `replicas: 0` instead of deleting the runner deployment is that you can let GitHub Actions queue jobs until there will be one or more runners. See [#465](https://github.com/actions-runner-controller/actions-runner-controller/pull/465) for more information.
|
||||
|
||||
Also note that the controller creates a "registration-only" runner per RunnerReplicaSet on it's being scaled to zero,
|
||||
and retains it until there are one or more runners available.
|
||||
|
||||
This, in combination with a correctly configured HorizontalRunnerAutoscaler, allows you to automatically [scale to/from 0](#autoscaling-tofrom-0)
|
||||
|
||||
### Autoscaling
|
||||
|
||||
__**IMPORTANT : Due to limitations / a bug with GitHub's [routing engine](https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow#routing-precedence-for-self-hosted-runners) autoscaling does NOT work correctly with RunnerDeployments that target the enterprise level. Scaling activity works as expected however jobs fail to get assigned to the scaled out replicas. This was explored in issue [#470](https://github.com/actions-runner-controller/actions-runner-controller/issues/470). Once GitHub resolves the issue with their backend service we expect the solution to be able to support autoscaled enterprise runnerdeploments without any additional changes.**__
|
||||
|
||||
A `RunnerDeployment` (excluding enterprise runners) can scale the number of runners between `minReplicas` and `maxReplicas` fields based the chosen scaling metric as defined in the `metrics` attribute
|
||||
|
||||
**Scaling Metrics**
|
||||
|
||||
**TotalNumberOfQueuedAndInProgressWorkflowRuns**
|
||||
|
||||
In the below example, `actions-runner` will pole GitHub for all pending workflows with the pole period defined by the sync period configuration. It will then scale to e.g. 3 if there're 3 pending jobs at sync time.
|
||||
In the below example, `actions-runner` will poll GitHub for all pending workflows with the poll period defined by the sync period configuration. It will then scale to e.g. 3 if there're 3 pending jobs at sync time.
|
||||
With this scaling metric we are required to define a list of repositories within our metric.
|
||||
|
||||
The scale out performance is controlled via the manager containers startup `--sync-period` argument. The default value is set to 10 minutes to prevent default deployments rate limiting themselves from the GitHub API.
|
||||
|
||||
**Kustomize Config :** The period can be customised in the `config/default/manager_auth_proxy_patch.yaml` patch<br />
|
||||
**Helm Config :** `syncPeriod`
|
||||
|
||||
**Benefits of this metric**
|
||||
1. Supports named repositories allowing you to restrict the runner to a specified set of repositories server side.
|
||||
2. Scales quickly (within the bounds of the syncPeriod) as it will spin up the number of runners based on the depth of the workflow queue
|
||||
3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [Github labels](#runner-labels).
|
||||
2. Scales the runner count based on the actual queue depth of the jobs meaning a more 1:1 scaling of runners to queued jobs (caveat, see drawback #4)
|
||||
3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [GitHub labels](#runner-labels).
|
||||
|
||||
**Drawbacks of this metric**
|
||||
1. Repositories must be named within the scaling metric, maintaining a list of repositories may not be viable in larger environments or self-serve environments.
|
||||
2. May not scale quick enough for some users needs
|
||||
2. May not scale quick enough for some users needs. This metric is pull based and so the queue depth is polled as configured by the sync period, as a result scaling performance is bound by this sync period meaning there is a lag to scaling activity.
|
||||
3. Relatively large amounts of API requests required to maintain this metric, you may run in API rate limiting issues depending on the size of your environment and how aggressive your sync period configuration is
|
||||
4. The GitHub API doesn't provide a way to filter workflow jobs to just those targeting self-hosted runners. If your environment's workflows target both self-hosted and GitHub hosted runners then the queue depth this metric scales against isn't a true 1:1 mapping of queue depth to required runner count. As a result of this, this metric may scale too aggressively for your actual self-hosted runner count needs.
|
||||
|
||||
|
||||
Example `RunnerDeployment` backed by a `HorizontalRunnerAutoscaler`
|
||||
Example `RunnerDeployment` backed by a `HorizontalRunnerAutoscaler`:
|
||||
|
||||
_Important!!! We no longer include the attribute `replicas` in our `RunnerDeployment` if we are configuring autoscaling!_
|
||||
|
||||
```yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
@@ -322,7 +388,7 @@ metadata:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
repository: summerwind/actions-runner-controller
|
||||
repository: actions-runner-controller/actions-runner-controller
|
||||
---
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
@@ -336,7 +402,7 @@ spec:
|
||||
metrics:
|
||||
- type: TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
repositoryNames:
|
||||
- summerwind/actions-runner-controller
|
||||
- actions-runner-controller/actions-runner-controller
|
||||
```
|
||||
|
||||
Additionally, the `HorizontalRunnerAutoscaler` also has an anti-flapping option that prevents periodic loop of scaling up and down.
|
||||
@@ -349,25 +415,26 @@ spec:
|
||||
|
||||
**PercentageRunnersBusy**
|
||||
|
||||
The `HorizontalRunnerAutoscaler` will pole GitHub based on the configuration sync period for the number of busy runners which live in the RunnerDeployment's namespace and scale based on the settings
|
||||
The `HorizontalRunnerAutoscaler` will poll GitHub based on the configuration sync period for the number of busy runners which live in the RunnerDeployment's namespace and scale based on the settings
|
||||
|
||||
**Kustomize Config :** The period can be customised in the `config/default/manager_auth_proxy_patch.yaml` patch<br />
|
||||
**Helm Config :** `syncPeriod`
|
||||
|
||||
**Benefits of this metric**
|
||||
1. Allows for multiple controllers to be deployed as each controller deployed is responsible for scaling their own runner pods on a per namespace basis.
|
||||
2. Supports named repositories server side the same as the `TotalNumberOfQueuedAndInProgressWorkflowRuns` metric [#313](https://github.com/summerwind/actions-runner-controller/pull/313)
|
||||
3. Supports github organisation wide scaling without maintaining an explicit list of repositories, this is especially useful for those that are working at a larger scale. [#223](https://github.com/summerwind/actions-runner-controller/pull/223)
|
||||
4. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [Github labels](#runner-labels)
|
||||
5. Supports scaling runner count on both a percentage increase / descrease basis as well as on a fixed runner count basis [#223](https://github.com/summerwind/actions-runner-controller/pull/223) [#315](https://github.com/summerwind/actions-runner-controller/pull/315)
|
||||
1. Supports named repositories server side the same as the `TotalNumberOfQueuedAndInProgressWorkflowRuns` metric [#313](https://github.com/actions-runner-controller/actions-runner-controller/pull/313)
|
||||
2. Supports GitHub organization wide scaling without maintaining an explicit list of repositories, this is especially useful for those that are working at a larger scale. [#223](https://github.com/actions-runner-controller/actions-runner-controller/pull/223)
|
||||
3. Like all scaling metrics, you can manage workflow allocation to the RunnerDeployment through the use of [GitHub labels](#runner-labels)
|
||||
4. Supports scaling desired runner count on both a percentage increase / decrease basis as well as on a fixed increase / decrease count basis [#223](https://github.com/actions-runner-controller/actions-runner-controller/pull/223) [#315](https://github.com/actions-runner-controller/actions-runner-controller/pull/315)
|
||||
|
||||
**Drawbacks of this metric**
|
||||
1. May not scale quick enough for some users needs as we are scaling up and down based on indicative information rather than a direct count of the workflow queue depth
|
||||
1. May not scale quick enough for some users needs. This metric is pull based and so the number of busy runners are polled as configured by the sync period, as a result scaling performance is bound by this sync period meaning there is a lag to scaling activity.
|
||||
2. We are scaling up and down based on indicative information rather than a count of the actual number of queued jobs and so the desired runner count is likely to under provision new runners or overprovision them relative to actual job queue depth, this may or may not be a problem for you.
|
||||
|
||||
|
||||
Examples of each scaling type implemented with a `RunnerDeployment` backed by a `HorizontalRunnerAutoscaler`:
|
||||
|
||||
|
||||
_Important!!! We no longer include the attribute `replicas` in our `RunnerDeployment` if we are configuring autoscaling!_
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
@@ -415,15 +482,17 @@ spec:
|
||||
|
||||
#### Faster Autoscaling with GitHub Webhook
|
||||
|
||||
__**IMPORTANT : Due to missing webhook events, webhook based scaling is not available for enterprise level RunnerDeployments. This was explored in issue [#470](https://github.com/actions-runner-controller/actions-runner-controller/issues/470).**__
|
||||
|
||||
> This feature is an ADVANCED feature which may require more work to set up.
|
||||
> Please get prepared to put some time and effort to learn and leverage this feature!
|
||||
|
||||
`actions-runner-controller` has an optional Webhook server that receives GitHub Webhook events and scale
|
||||
[`RunnerDeployment`s](#runnerdeployments) by updating corresponding [`HorizontalRunnerAutoscaler`s](#autoscaling).
|
||||
[`RunnerDeployments`](#runnerdeployments) by updating corresponding [`HorizontalRunnerAutoscalers`](#autoscaling).
|
||||
|
||||
Today, the Webhook server can be configured to respond GitHub `check_run`, `pull_request`, and `push` events
|
||||
by scaling up the matching `HorizontalRunnerAutoscaler` by N replica(s), where `N` is configurable within
|
||||
`HorizontalRunerAutoscaler`'s `Spec`.
|
||||
`HorizontalRunnerAutoscaler's` `Spec`.
|
||||
|
||||
More concretely, you can configure the targeted GitHub event types and the `N` in
|
||||
`scaleUpTriggers`:
|
||||
@@ -433,7 +502,7 @@ kind: HorizontalRunnerAutoscaler
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
name: myrunners
|
||||
scaleUpTrigggers:
|
||||
scaleUpTriggers:
|
||||
- githubEvent:
|
||||
checkRun:
|
||||
types: ["created"]
|
||||
@@ -452,7 +521,7 @@ In contrast, the standard autoscaling requires you to wait next sync period to a
|
||||
insufficient runners. You can definitely shorten the sync period to make the standard autoscaling more responsive.
|
||||
But doing so eventually result in the controller not functional due to GitHub API rate limit.
|
||||
|
||||
> You can learn the implementation details in #282
|
||||
> You can learn the implementation details in [#282](https://github.com/actions-runner-controller/actions-runner-controller/pull/282)
|
||||
|
||||
To enable this feature, you firstly need to install the webhook server.
|
||||
|
||||
@@ -492,7 +561,7 @@ kind: HorizontalRunnerAutoscaler
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
name: myrunners
|
||||
scaleUpTrigggers:
|
||||
scaleUpTriggers:
|
||||
- githubEvent:
|
||||
checkRun:
|
||||
types: ["created"]
|
||||
@@ -514,7 +583,7 @@ kind: HorizontalRunnerAutoscaler
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
name: myrunners
|
||||
scaleUpTrigggers:
|
||||
scaleUpTriggers:
|
||||
- githubEvent:
|
||||
pullRequest:
|
||||
types: ["synchronize"]
|
||||
@@ -525,6 +594,101 @@ spec:
|
||||
|
||||
See ["activity types"](https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request) for the list of valid values for `scaleUpTriggers[].githubEvent.pullRequest.types`.
|
||||
|
||||
|
||||
#### Autoscaling to/from 0
|
||||
|
||||
> This feature is available since actions-runner-controller v0.19.0
|
||||
|
||||
Previously, we've discussed about [how to scale a RunnerDeployment to/from 0](#note-on-scaling-tofrom-0)
|
||||
|
||||
To automate the process of scaling to/from 0, you can use `HorizontalRunnerAutoscaler` with a caveat.
|
||||
|
||||
That is, you need to choose one of the following configuration for metrics and triggers:
|
||||
|
||||
- `TotalNumberOfQueuedAndInProgressWorkflowRuns`
|
||||
- `PercentageRunnersBusy` + `TotalNumberOfQueuedAndInProgressWorkflowRuns`
|
||||
- `PercentageRunnersBusy` + Webhook-based autoscaling
|
||||
|
||||
This is due to that `PercentageRunnersBusy`, by its definition, needs one or more GitHub runners that can become `busy`, which cannot happen at all when you have 0 active runners.
|
||||
|
||||
If and only if HorizontalRunnerAutoscaler is configured to have a secondary metric of `TotalNumberOfQueuedAndInProgressWorkflowRuns` and the controller sees the primary metric of `PercentageRunnersBusy` returned 0 desired replicas, it uses the secondary metric for calculating the desired replicas once again.
|
||||
|
||||
A correctly configured `TotalNumberOfQueuedAndInProgressWorkflowRuns` can return non-zero desired replicas even when there are no runners other than [registration-only runners](#note-on-scaling-tofrom-0), hence the `PercentageRunnersBusy` + `TotalNumberOfQueuedAndInProgressWorkflowRuns` configuration makes scaling from zero possible.
|
||||
|
||||
Similarly, Webhook-based autoscaling works regardless of there are active runners, hence `PercentageRunnersBusy` + Webhook-based autoscaling configuration makes scaling from zero, too.
|
||||
|
||||
#### Scheduled Overrides
|
||||
|
||||
> This feature is available since actions-runner-controller v0.19.0
|
||||
|
||||
`Scheduled Overrides` allows you to configure HorizontalRunnerAutoscaler so that its Spec gets updated only during a certain period of time.
|
||||
|
||||
usually, this feature is used for following scenarios:
|
||||
|
||||
- You want to pay for your infrastructure cost running runners only in business hours
|
||||
- You want to prepare for scheduled spikes in workloads
|
||||
|
||||
For the first scenario, you might consider configuration like the below:
|
||||
|
||||
```
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
metadata:
|
||||
name: example-runner-deployment-autoscaler
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
name: example-runner-deployment
|
||||
scheduledOverrides:
|
||||
# Override minReplicas to 0 only between 0am sat to 0am mon
|
||||
- startTime: "2021-05-01T00:00:00+09:00"
|
||||
endTime: "2021-05-03T00:00:00+09:00"
|
||||
recurrenceRule:
|
||||
frequency: Weekly
|
||||
untilTime: "2022-05-01T00:00:00+09:00"
|
||||
minReplicas: 0
|
||||
minReplicas: 1
|
||||
```
|
||||
|
||||
For the second scenario, you might consider something like the below:
|
||||
|
||||
```
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
metadata:
|
||||
name: example-runner-deployment-autoscaler
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
name: example-runner-deployment
|
||||
scheduledOverrides:
|
||||
# Override minReplicas to 100 only between 2021-06-01T00:00:00+09:00 and 2021-06-03T00:00:00+09:00
|
||||
- startTime: "2021-06-01T00:00:00+09:00"
|
||||
endTime: "2021-06-03T00:00:00+09:00"
|
||||
minReplicas: 100
|
||||
minReplicas: 1
|
||||
```
|
||||
|
||||
The most basic usage of this feature is actually the second scenario mentioned above.
|
||||
A scheduled override without `recurrenceRule` is considered a one-off override, that is active between `startTime` and `endTime`. In the second scenario, it overrides `minReplicas` to `100` only between `2021-06-01T00:00:00+09:00` and `2021-06-03T00:00:00+09:00`.
|
||||
|
||||
A scheduled override with `recurrenceRule` is considered a recurring override. A recurring override is initially active between `startTime` and `endTime`, and then it repeatedly get activated after a certain period of time denoted by `frequency`.
|
||||
|
||||
`frequecy` can take one of the following values:
|
||||
|
||||
- `Daily`
|
||||
- `Weekly`
|
||||
- `Monthly`
|
||||
- `Yearly`
|
||||
|
||||
By default, a scheduled override repeats forever. If you want it to repeat until a specific point in time, define `untilTime`. The controller create the last recurrence of the override until the recurrence's `startTime` is equal or earlier than `untilTime`.
|
||||
|
||||
Do note that you have enough slack for `untilTime`, so that a delayed or offline `actions-runner-controller` is much less likely to miss the last recurrence. For example, you might want to set `untilTime` to `M` minutes after the last recurrence's `startTime`, so that `actions-runner-controller` being offline up to `M` minutes doesn't miss the last recurrence.
|
||||
|
||||
**Combining Multiple Scheduled Overrides**:
|
||||
|
||||
In case you have a more complex scenarios, try writing two or more entries under `scheduledOverrides`.
|
||||
|
||||
The earlier entry is prioritized higher than later entries. So you usually define one-time overrides in the top of your list, then yearly, monthly, weekly, and lastly daily overrides.
|
||||
|
||||
### Runner with DinD
|
||||
|
||||
When using default runner, runner pod starts up 2 containers: runner and DinD (Docker-in-Docker). This might create issues if there's `LimitRange` set to namespace.
|
||||
@@ -547,7 +711,7 @@ spec:
|
||||
|
||||
This also helps with resources, as you don't need to give resources separately to docker and runner.
|
||||
|
||||
### Additional tweaks
|
||||
### Additional Tweaks
|
||||
|
||||
You can pass details through the spec selector. Here's an eg. of what you may like to do:
|
||||
|
||||
@@ -560,16 +724,31 @@ metadata:
|
||||
spec:
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/test: ""
|
||||
|
||||
securityContext:
|
||||
#All level/role/type/user values will vary based on your SELinux policies.
|
||||
#See https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html/container_security_guide/docker_selinux_security_policy for information about SELinux with containers
|
||||
seLinuxOptions:
|
||||
level: "s0"
|
||||
role: "system_r"
|
||||
type: "super_t"
|
||||
user: "system_u"
|
||||
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/test
|
||||
operator: Exists
|
||||
|
||||
repository: mumoshu/actions-runner-controller-ci
|
||||
# The default "summerwind/actions-runner" images are available at DockerHub:
|
||||
# https://hub.docker.com/r/summerwind/actions-runner
|
||||
# You can also build your own and specify it like the below:
|
||||
image: custom-image/actions-runner:latest
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
@@ -585,12 +764,29 @@ spec:
|
||||
operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
tolerationSeconds: 10
|
||||
# true (default) = The runner restarts after running jobs, to ensure a clean and reproducible build environment
|
||||
# false = The runner is persistent across jobs and doesn't automatically restart
|
||||
# This directly controls the behaviour of `--once` flag provided to the github runner
|
||||
ephemeral: false
|
||||
# true (default) = A privileged docker sidecar container is included in the runner pod.
|
||||
# false = A docker sidecar container is not included in the runner pod and you can't use docker.
|
||||
# If set to false, there are no privileged container and you cannot use docker.
|
||||
dockerEnabled: false
|
||||
# Optional Docker containers network MTU
|
||||
# If your network card MTU is smaller than Docker's default 1500, you might encounter Docker networking issues.
|
||||
# To fix these issues, you should setup Docker MTU smaller than or equal to that on the outgoing network card.
|
||||
# More information:
|
||||
# - https://mlohr.com/docker-mtu/
|
||||
dockerMTU: 1500
|
||||
# Optional Docker registry mirror
|
||||
# Docker Hub has enabled rate-limiting for free plans.
|
||||
# To avoid disruptions in your CI/CD pipelines, you might want to setup an external or on-premises Docker registry mirror.
|
||||
# More information:
|
||||
# - https://docs.docker.com/docker-hub/download-rate-limit/
|
||||
# - https://cloud.google.com/container-registry/docs/pulling-cached-images
|
||||
dockerRegistryMirror: https://mirror.gcr.io/
|
||||
# false (default) = Docker support is provided by a sidecar container deployed in the runner pod.
|
||||
# true = No docker sidecar container is deployed in the runner pod but docker can be used within teh runner container instead. The image summerwind/actions-runner-dind is used by default.
|
||||
# true = No docker sidecar container is deployed in the runner pod but docker can be used within the runner container instead. The image summerwind/actions-runner-dind is used by default.
|
||||
dockerdWithinRunnerContainer: true
|
||||
# Docker sidecar container image tweaks examples below, only applicable if dockerdWithinRunnerContainer = false
|
||||
dockerdContainerResources:
|
||||
@@ -613,9 +809,34 @@ spec:
|
||||
# You can customise this setting allowing you to change the default working directory location
|
||||
# for example, the below setting is the same as on the ubuntu-18.04 image
|
||||
workDir: /home/runner/work
|
||||
# You can mount some of the shared volumes to the dind container using dockerVolumeMounts, like any other volume mounting.
|
||||
# NOTE: in case you want to use an hostPath like the following example, make sure that Kubernetes doesn't schedule more than one runner
|
||||
# per physical host. You can achieve that by setting pod anti-affinity rules and/or resource requests/limits.
|
||||
volumes:
|
||||
- name: docker-extra
|
||||
hostPath:
|
||||
path: /mnt/docker-extra
|
||||
type: DirectoryOrCreate
|
||||
- name: repo
|
||||
hostPath:
|
||||
path: /mnt/repo
|
||||
type: DirectoryOrCreate
|
||||
dockerVolumeMounts:
|
||||
- mountPath: /var/lib/docker
|
||||
name: docker-extra
|
||||
# You can mount some of the shared volumes to the runner container using volumeMounts.
|
||||
# NOTE: Do not try to mount the volume onto the runner workdir itself as it will not work. You could mount it however on a sub directory in the runner workdir
|
||||
# Please see https://github.com/actions-runner-controller/actions-runner-controller/issues/630#issuecomment-862087323 for more information.
|
||||
volumeMounts:
|
||||
- mountPath: /home/runner/work/repo
|
||||
name: repo
|
||||
# Optional name of the container runtime configuration that should be used for pods.
|
||||
# This must match the name of a RuntimeClass resource available on the cluster.
|
||||
# More info: https://kubernetes.io/docs/concepts/containers/runtime-class
|
||||
runtimeClassName: "runc"
|
||||
```
|
||||
|
||||
### Runner labels
|
||||
### Runner Labels
|
||||
|
||||
To run a workflow job on a self-hosted runner, you can use the following syntax in your workflow:
|
||||
|
||||
@@ -637,7 +858,7 @@ spec:
|
||||
replicas: 1
|
||||
template:
|
||||
spec:
|
||||
repository: summerwind/actions-runner-controller
|
||||
repository: actions-runner-controller/actions-runner-controller
|
||||
labels:
|
||||
- custom-runner
|
||||
```
|
||||
@@ -654,7 +875,7 @@ Note that if you specify `self-hosted` in your workflow, then this will run your
|
||||
|
||||
### Runner Groups
|
||||
|
||||
Runner groups can be used to limit which repositories are able to use the GitHub Runner at an Organisation level. Runner groups have to be [created in GitHub first](https://docs.github.com/en/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups) before they can be referenced.
|
||||
Runner groups can be used to limit which repositories are able to use the GitHub Runner at an organization level. Runner groups have to be [created in GitHub first](https://docs.github.com/en/actions/hosting-your-own-runners/managing-access-to-self-hosted-runners-using-groups) before they can be referenced.
|
||||
|
||||
To add the runner to the group `NewGroup`, specify the group in your `Runner` or `RunnerDeployment` spec.
|
||||
|
||||
@@ -671,15 +892,14 @@ spec:
|
||||
group: NewGroup
|
||||
```
|
||||
|
||||
### Using EKS IAM role for service accounts
|
||||
### Using IRSA (IAM Roles for Service Accounts) in EKS
|
||||
|
||||
`actions-runner-controller` v0.15.0 or later has support for EKS IAM role for service accounts.
|
||||
`actions-runner-controller` v0.15.0 or later has support for IRSA in EKS.
|
||||
|
||||
As similar as for regular pods and deployments, you firstly need an existing service account with the IAM role associated.
|
||||
Create one using e.g. `eksctl`. You can refer to [the EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) for more details.
|
||||
|
||||
Once you set up the service account, all you need is to add `serviceAccountName` and `fsGroup` to any pods that uses
|
||||
the IAM-role enabled service account.
|
||||
Once you set up the service account, all you need is to add `serviceAccountName` and `fsGroup` to any pods that uses the IAM-role enabled service account.
|
||||
|
||||
For `RunnerDeployment`, you can set those two fields under the runner spec at `RunnerDeployment.Spec.Template`:
|
||||
|
||||
@@ -694,17 +914,124 @@ spec:
|
||||
repository: USER/REO
|
||||
serviceAccountName: my-service-account
|
||||
securityContext:
|
||||
fsGroup: 1447
|
||||
fsGroup: 1000
|
||||
```
|
||||
|
||||
### Software installed in the runner image
|
||||
|
||||
The GitHub hosted runners include a large amount of pre-installed software packages. For Ubuntu 18.04, this list can be found at <https://github.com/actions/virtual-environments/blob/master/images/linux/Ubuntu1804-README.md>
|
||||
### Use with Istio
|
||||
|
||||
The container image is based on Ubuntu 18.04, but it does not contain all of the software installed on the GitHub runners. It contains the following subset of packages from the GitHub runners:
|
||||
Istio 1.7.0 or greater has `holdApplicationUntilProxyStarts` added in https://github.com/istio/istio/pull/24737, which enables you to delay the `runner` container startup until the injected `istio-proxy` container finish starting. Try using it if you need to use Istio. Otherwise the runner is unlikely to work, because it fails to call any GitHub API to register itself due to `istio-proxy` being not up and running yet.
|
||||
|
||||
Note that there's no official Istio integration in actions-runner-controller. It should work, but it isn't covered by our acceptance test(contribution is welcomed). In addition to that, none of the actions-runner-controller maintainers use Istio daily. If you need more information, or have any issues using it, refer to the following links:
|
||||
|
||||
- https://github.com/actions-runner-controller/actions-runner-controller/issues/591
|
||||
- https://github.com/actions-runner-controller/actions-runner-controller/pull/592
|
||||
- https://github.com/istio/istio/issues/11130
|
||||
|
||||
### Stateful Runners
|
||||
|
||||
> This is a documentation about a unreleased version of actions-runner-controller.
|
||||
>
|
||||
> It would be great if you could try building the latest controller image following https://github.com/actions-runner-controller/actions-runner-controller#contributing if you are eager to test it early and help
|
||||
> developers by reporting any bugs :smile:
|
||||
|
||||
`actions-runner-controller` supports `RunnerSet` API that let you deploy stateful runners. A stateful runner is designed to be able to store some data persists across GitHub Actions workflow and job runs. You might find it useful, for example, to speed up your docker builds by persisting the docker layer cache.
|
||||
|
||||
A basic `RunnerSet` would look like this:
|
||||
|
||||
```
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerSet
|
||||
metadata:
|
||||
name: example
|
||||
spec:
|
||||
ephemeral: false
|
||||
replicas: 2
|
||||
repository: mumoshu/actions-runner-controller-ci
|
||||
# Other mandatory fields from StatefulSet
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example
|
||||
serviceName: example
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: example
|
||||
```
|
||||
|
||||
As it is based on `StatefulSet`, `selector` and `template.medatada.labels` needs to be defined and haev the exact same set of labels. `serviceName` must be set to some non-empty string as it is also required by `StatefulSet`.
|
||||
|
||||
Runner-related fields like `ephemeral`, `repository`, `organiåtion`, `enterprise`, and so on should be written directly under `spec`.
|
||||
|
||||
Fields like `volumeClaimTemplates` that originates from `StatefulSet` shuold also be written directly under `spec`.
|
||||
|
||||
Pod-related fields like security contexts and volumes are written under `spec.template.spec` like `StatefulSet`.
|
||||
|
||||
Simillarly, container-related fields like resource requests and limits, container image names and tags, security context, and so on are written under `spec.template.spec.containers`. There are two reserved container `name`, `runner` and `docker`. The former is for the container that runs [actions runner](https://github.com/actions/runner) and the latter is for the container that runs a dockerd.
|
||||
|
||||
For a more complex example, see the below:
|
||||
|
||||
```
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerSet
|
||||
metadata:
|
||||
name: example
|
||||
spec:
|
||||
# NOTE: RunnerSet supports non-ephemeral runners only today
|
||||
ephemeral: false
|
||||
replicas: 2
|
||||
repository: mumoshu/actions-runner-controller-ci
|
||||
dockerdWithinRunnerContainer: true
|
||||
template:
|
||||
spec:
|
||||
securityContext:
|
||||
#All level/role/type/user values will vary based on your SELinux policies.
|
||||
#See https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html/container_security_guide/docker_selinux_security_policy for information about SELinux with containers
|
||||
seLinuxOptions:
|
||||
level: "s0"
|
||||
role: "system_r"
|
||||
type: "super_t"
|
||||
user: "system_u"
|
||||
containers:
|
||||
- name: runner
|
||||
env: []
|
||||
resources:
|
||||
limits:
|
||||
cpu: "4.0"
|
||||
memory: "8Gi"
|
||||
requests:
|
||||
cpu: "2.0"
|
||||
memory: "4Gi"
|
||||
- name: docker
|
||||
resources:
|
||||
limits:
|
||||
cpu: "4.0"
|
||||
memory: "8Gi"
|
||||
requests:
|
||||
cpu: "2.0"
|
||||
memory: "4Gi"
|
||||
```
|
||||
|
||||
You can also read the design and usage documentation written in the original pull request that introduced `RunnerSet` for more information.
|
||||
|
||||
https://github.com/actions-runner-controller/actions-runner-controller/pull/629
|
||||
|
||||
Under the hood, `RunnerSet` relies on Kubernetes's `StatefulSet` and Mutating Webhook. A statefulset is used to create a number of pods that has stable names and dynamically provisioned persistent volumes, so that each statefulset-managed pod gets the same persisntet volume even after restarting. A mutating webhook is used to dynamically inject a runner's "registration token" which is used to call GitHub's "Create Runner" API.
|
||||
|
||||
We envision that `RunnerSet` will eventually replaces `RunnerDeployment`, as `RunnerSet` provides a more standard API that is easy to learn and use because it is based on `StatefulSet`, and it has a support for `volumeClaimTemplates` which is crucial to manage dynamically provisioned persistent volumes.
|
||||
|
||||
### Software Installed in the Runner Image
|
||||
|
||||
**Cloud Tooling**<br />
|
||||
The project supports being deployed on the various cloud Kubernetes platforms (e.g. EKS), it does not however aim to go beyond that. No cloud specific tooling is bundled in the base runner, this is an active decision to keep the overhead of maintaining the solution manageable.
|
||||
|
||||
**Bundled Software**<br />
|
||||
The GitHub hosted runners include a large amount of pre-installed software packages. GitHub maintain a list in README files at <https://github.com/actions/virtual-environments/tree/main/images/linux>
|
||||
|
||||
This solution maintains a few runner images with `latest` aligning with GitHub's Ubuntu version. Older images are maintained whilst GitHub also provides them as an option. These images do not contain all of the software installed on the GitHub runners. It contains the following subset of packages from the GitHub runners:
|
||||
|
||||
- Basic CLI packages
|
||||
- git (2.26)
|
||||
- git
|
||||
- docker
|
||||
- build-essentials
|
||||
|
||||
@@ -728,7 +1055,7 @@ kind: Runner
|
||||
metadata:
|
||||
name: custom-runner
|
||||
spec:
|
||||
repository: summerwind/actions-runner-controller
|
||||
repository: actions-runner-controller/actions-runner-controller
|
||||
image: YOUR_CUSTOM_DOCKER_IMAGE
|
||||
```
|
||||
|
||||
@@ -737,62 +1064,74 @@ spec:
|
||||
#### invalid header field value
|
||||
|
||||
```json
|
||||
2020-11-12T22:17:30.693Z ERROR controller-runtime.controller Reconciler error {"controller": "runner", "request": "actions-runner-system/runner-deployment-dk7q8-dk5c9", "error": "failed to create registration token: Post \"https://api.github.com/orgs/$YOUR_ORG_HERE/actions/runners/registration-token\": net/http: invalid header field value \"Bearer $YOUR_TOKEN_HERE\\n\" for key Authorization"}
|
||||
2020-11-12T22:17:30.693Z ERROR controller-runtime.controller Reconciler error
|
||||
{
|
||||
"controller": "runner",
|
||||
"request": "actions-runner-system/runner-deployment-dk7q8-dk5c9",
|
||||
"error": "failed to create registration token: Post \"https://api.github.com/orgs/$YOUR_ORG_HERE/actions/runners/registration-token\": net/http: invalid header field value \"Bearer $YOUR_TOKEN_HERE\\n\" for key Authorization"
|
||||
}
|
||||
```
|
||||
|
||||
**Solutions**<br />
|
||||
Your base64'ed PAT token has a new line at the end, it needs to be created without a `\n` added
|
||||
**Solution**
|
||||
|
||||
Your base64'ed PAT token has a new line at the end, it needs to be created without a `\n` added, either:
|
||||
* `echo -n $TOKEN | base64`
|
||||
* Create the secret as described in the docs using the shell and documeneted flags
|
||||
* Create the secret as described in the docs using the shell and documented flags
|
||||
|
||||
# Developing
|
||||
#### Runner coming up before network available
|
||||
|
||||
If you'd like to modify the controller to fork or contribute, I'd suggest using the following snippet for running
|
||||
the acceptance test:
|
||||
If you're running your action runners on a service mesh like Istio, you might
|
||||
have problems with runner configuration accompanied by logs like:
|
||||
|
||||
```shell
|
||||
# This sets `VERSION` envvar to some appropriate value
|
||||
. hack/make-env.sh
|
||||
|
||||
NAME=$DOCKER_USER/actions-runner-controller \
|
||||
GITHUB_TOKEN=*** \
|
||||
APP_ID=*** \
|
||||
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||
INSTALLATION_ID=*** \
|
||||
make docker-build docker-push acceptance
|
||||
```
|
||||
....
|
||||
runner Starting Runner listener with startup type: service
|
||||
runner Started listener process
|
||||
runner An error occurred: Not configured
|
||||
runner Runner listener exited with error code 2
|
||||
runner Runner listener exit with retryable error, re-launch runner in 5 seconds.
|
||||
....
|
||||
```
|
||||
|
||||
Please follow the instructions explained in [Using Personal Access Token](#using-personal-access-token) to obtain
|
||||
`GITHUB_TOKEN`, and those in [Using GitHub App](#using-github-app) to obtain `APP_ID`, `INSTALLATION_ID`, and
|
||||
`PRIAVTE_KEY_FILE_PATH`.
|
||||
This is because the `istio-proxy` has not completed configuring itself when the
|
||||
configuration script tries to communicate with the network.
|
||||
|
||||
The test creates a one-off `kind` cluster, deploys `cert-manager` and `actions-runner-controller`,
|
||||
creates a `RunnerDeployment` custom resource for a public Git repository to confirm that the
|
||||
controller is able to bring up a runner pod with the actions runner registration token installed.
|
||||
**Solution**<br />
|
||||
|
||||
If you prefer to test in a non-kind cluster, you can instead run:
|
||||
> This feature is experimental and will be dropped once maintainers think that
|
||||
> everyone has already migrated to use Istio's `holdApplicationUntilProxyStarts` ([istio/istio#11130](https://github.com/istio/istio/issues/11130)).
|
||||
>
|
||||
> Please read the discussion in #592 for more information.
|
||||
|
||||
```shell script
|
||||
KUBECONFIG=path/to/kubeconfig \
|
||||
NAME=$DOCKER_USER/actions-runner-controller \
|
||||
GITHUB_TOKEN=*** \
|
||||
APP_ID=*** \
|
||||
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||
INSTALLATION_ID=*** \
|
||||
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
||||
make docker-build docker-push \
|
||||
acceptance/setup acceptance/tests
|
||||
You can add a delay to the entrypoint script by setting the `STARTUP_DELAY` environment
|
||||
variable. This will cause the script to sleep `STARTUP_DELAY` seconds.
|
||||
|
||||
*Example `Runner` with a 2 second startup delay:*
|
||||
```yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: Runner
|
||||
metadata:
|
||||
name: example-runner-with-sleep
|
||||
spec:
|
||||
env:
|
||||
- name: STARTUP_DELAY
|
||||
value: "2" # Remember! env var values must be strings.
|
||||
```
|
||||
# Alternatives
|
||||
|
||||
The following is a list of alternative solutions that may better fit you depending on your use-case:
|
||||
*Example `RunnerDeployment` with a 2 second startup delay:*
|
||||
```yaml
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: example-runnerdeployment-with-sleep
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
env:
|
||||
- name: STARTUP_DELAY
|
||||
value: "2" # Remember! env var values must be strings.
|
||||
```
|
||||
|
||||
- <https://github.com/evryfs/github-actions-runner-operator/>
|
||||
- <https://github.com/philips-labs/terraform-aws-github-runner/>
|
||||
# Contributing
|
||||
|
||||
Although the situation can change over time, as of writing this sentence, the benefits of using `actions-runner-controller` over the alternatives are:
|
||||
|
||||
- `actions-runner-controller` has the ability to autoscale runners based on number of pending/progressing jobs (#99)
|
||||
- `actions-runner-controller` is able to gracefully stop runners (#103)
|
||||
- `actions-runner-controller` has ARM support
|
||||
- `actions-runner-controller` has GitHub Enterprise support (see [GitHub Enterprise support](#github-enterprise-support) section for caveats)
|
||||
For more details on contributing to the project (including requirements) please check out [Getting Started with Contributing](CONTRIBUTING.md).
|
||||
@@ -1,29 +1,84 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set +e
|
||||
|
||||
runner_name=
|
||||
repo_runnerdeployment_passed="skipped"
|
||||
repo_runnerset_passed="skipped"
|
||||
|
||||
while [ -z "${runner_name}" ]; do
|
||||
echo Finding the runner... 1>&2
|
||||
sleep 1
|
||||
runner_name=$(kubectl get runner --output=jsonpath="{.items[*].metadata.name}")
|
||||
done
|
||||
echo "Checking if RunnerDeployment repo test is set"
|
||||
if [ "${TEST_REPO}" ] && [ ! "${USE_RUNNERSET}" ]; then
|
||||
runner_name=
|
||||
count=0
|
||||
while [ $count -le 30 ]; do
|
||||
echo "Finding Runner ..."
|
||||
runner_name=$(kubectl get runner --output=jsonpath="{.items[*].metadata.name}")
|
||||
if [ "${runner_name}" ]; then
|
||||
while [ $count -le 30 ]; do
|
||||
runner_pod_name=
|
||||
echo "Found Runner \""${runner_name}"\""
|
||||
echo "Finding underlying pod ..."
|
||||
runner_pod_name=$(kubectl get pod --output=jsonpath="{.items[*].metadata.name}" | grep ${runner_name})
|
||||
if [ "${runner_pod_name}" ]; then
|
||||
echo "Found underlying pod \""${runner_pod_name}"\""
|
||||
echo "Waiting for pod \""${runner_pod_name}"\" to become ready..."
|
||||
kubectl wait pod/${runner_pod_name} --for condition=ready --timeout 270s
|
||||
break 2
|
||||
fi
|
||||
sleep 1
|
||||
let "count=count+1"
|
||||
done
|
||||
fi
|
||||
sleep 1
|
||||
let "count=count+1"
|
||||
done
|
||||
if [ $count -ge 30 ]; then
|
||||
repo_runnerdeployment_passed=false
|
||||
else
|
||||
repo_runnerdeployment_passed=true
|
||||
fi
|
||||
echo "Checking if RunnerSet repo test is set"
|
||||
elif [ "${TEST_REPO}" ] && [ "${USE_RUNNERSET}" ]; then
|
||||
runnerset_name=
|
||||
count=0
|
||||
while [ $count -le 30 ]; do
|
||||
echo "Finding RunnerSet ..."
|
||||
runnerset_name=$(kubectl get runnerset --output=jsonpath="{.items[*].metadata.name}")
|
||||
if [ "${runnerset_name}" ]; then
|
||||
while [ $count -le 30 ]; do
|
||||
runnerset_pod_name=
|
||||
echo "Found RunnerSet \""${runnerset_name}"\""
|
||||
echo "Finding underlying pod ..."
|
||||
runnerset_pod_name=$(kubectl get pod --output=jsonpath="{.items[*].metadata.name}" | grep ${runnerset_name})
|
||||
echo "BEFORE IF"
|
||||
if [ "${runnerset_pod_name}" ]; then
|
||||
echo "AFTER IF"
|
||||
echo "Found underlying pod \""${runnerset_pod_name}"\""
|
||||
echo "Waiting for pod \""${runnerset_pod_name}"\" to become ready..."
|
||||
kubectl wait pod/${runnerset_pod_name} --for condition=ready --timeout 270s
|
||||
break 2
|
||||
fi
|
||||
sleep 1
|
||||
let "count=count+1"
|
||||
done
|
||||
fi
|
||||
sleep 1
|
||||
let "count=count+1"
|
||||
done
|
||||
if [ $count -ge 30 ]; then
|
||||
repo_runnerset_passed=false
|
||||
else
|
||||
repo_runnerset_passed=true
|
||||
fi
|
||||
fi
|
||||
|
||||
echo Found runner ${runner_name}.
|
||||
|
||||
pod_name=
|
||||
|
||||
while [ -z "${pod_name}" ]; do
|
||||
echo Finding the runner pod... 1>&2
|
||||
sleep 1
|
||||
pod_name=$(kubectl get pod --output=jsonpath="{.items[*].metadata.name}" | grep ${runner_name})
|
||||
done
|
||||
|
||||
echo Found pod ${pod_name}.
|
||||
|
||||
echo Waiting for pod ${runner_name} to become ready... 1>&2
|
||||
|
||||
kubectl wait pod/${runner_name} --for condition=ready --timeout 180s
|
||||
|
||||
echo All tests passed. 1>&2
|
||||
if [ ${repo_runnerset_passed} == true ] || [ ${repo_runnerset_passed} == "skipped" ] && \
|
||||
[ ${repo_runnerdeployment_passed} == true ] || [ ${repo_runnerdeployment_passed} == "skipped" ]; then
|
||||
echo "INFO : All tests passed or skipped"
|
||||
echo "RunnerSet Repo Test Status : ${repo_runnerset_passed}"
|
||||
echo "RunnerDeployment Repo Test Status : ${repo_runnerdeployment_passed}"
|
||||
else
|
||||
echo "ERROR : Some tests failed"
|
||||
echo "RunnerSet Repo Test Status : ${repo_runnerset_passed}"
|
||||
echo "RunnerDeployment Repo Test Status : ${repo_runnerdeployment_passed}"
|
||||
exit 1
|
||||
fi
|
||||
@@ -4,10 +4,14 @@ set -e
|
||||
|
||||
tpe=${ACCEPTANCE_TEST_SECRET_TYPE}
|
||||
|
||||
VALUES_FILE=${VALUES_FILE:-$(dirname $0)/values.yaml}
|
||||
|
||||
if [ "${tpe}" == "token" ]; then
|
||||
kubectl create secret generic controller-manager \
|
||||
-n actions-runner-system \
|
||||
--from-literal=github_token=${GITHUB_TOKEN:?GITHUB_TOKEN must not be empty}
|
||||
if ! kubectl get secret controller-manager -n actions-runner-system >/dev/null; then
|
||||
kubectl create secret generic controller-manager \
|
||||
-n actions-runner-system \
|
||||
--from-literal=github_token=${GITHUB_TOKEN:?GITHUB_TOKEN must not be empty}
|
||||
fi
|
||||
elif [ "${tpe}" == "app" ]; then
|
||||
kubectl create secret generic controller-manager \
|
||||
-n actions-runner-system \
|
||||
@@ -26,17 +30,46 @@ if [ "${tool}" == "helm" ]; then
|
||||
charts/actions-runner-controller \
|
||||
-n actions-runner-system \
|
||||
--create-namespace \
|
||||
--set syncPeriod=5m
|
||||
kubectl -n actions-runner-system wait deploy/actions-runner-controller --for condition=available
|
||||
--set syncPeriod=${SYNC_PERIOD} \
|
||||
--set authSecret.create=false \
|
||||
--set image.repository=${NAME} \
|
||||
--set image.tag=${VERSION} \
|
||||
-f ${VALUES_FILE}
|
||||
kubectl apply -f charts/actions-runner-controller/crds
|
||||
kubectl -n actions-runner-system wait deploy/actions-runner-controller --for condition=available --timeout 60s
|
||||
else
|
||||
kubectl apply \
|
||||
-n actions-runner-system \
|
||||
-f release/actions-runner-controller.yaml
|
||||
kubectl -n actions-runner-system wait deploy/controller-manager --for condition=available --timeout 60s
|
||||
kubectl -n actions-runner-system wait deploy/controller-manager --for condition=available --timeout 120s
|
||||
fi
|
||||
|
||||
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
|
||||
sleep 20
|
||||
|
||||
kubectl apply \
|
||||
-f acceptance/testdata/runnerdeploy.yaml
|
||||
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
||||
|
||||
if [ -n "${TEST_REPO}" ]; then
|
||||
if [ -n "USE_RUNNERSET" ]; then
|
||||
cat acceptance/testdata/repo.runnerset.yaml | envsubst | kubectl apply -f -
|
||||
cat acceptance/testdata/repo.runnerset.hra.yaml | envsubst | kubectl apply -f -
|
||||
else
|
||||
echo 'Deploying runnerdeployment and hra. Set USE_RUNNERSET if you want to deploy runnerset instead.'
|
||||
cat acceptance/testdata/repo.runnerdeploy.yaml | envsubst | kubectl apply -f -
|
||||
cat acceptance/testdata/repo.hra.yaml | envsubst | kubectl apply -f -
|
||||
fi
|
||||
else
|
||||
echo 'Skipped deploying runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy.'
|
||||
fi
|
||||
|
||||
if [ -n "${TEST_ORG}" ]; then
|
||||
cat acceptance/testdata/org.runnerdeploy.yaml | envsubst | kubectl apply -f -
|
||||
|
||||
if [ -n "${TEST_ORG_REPO}" ]; then
|
||||
cat acceptance/testdata/org.hra.yaml | envsubst | kubectl apply -f -
|
||||
else
|
||||
echo 'Skipped deploying organizational hra. Set TEST_ORG_REPO to "yourorg/yourrepo" to deploy.'
|
||||
fi
|
||||
else
|
||||
echo 'Skipped deploying organizational runnerdeployment. Set TEST_ORG to deploy.'
|
||||
fi
|
||||
|
||||
10
acceptance/kind.yaml
Normal file
10
acceptance/kind.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
kind: Cluster
|
||||
nodes:
|
||||
- role: control-plane
|
||||
extraPortMappings:
|
||||
- containerPort: 31000
|
||||
hostPort: 31000
|
||||
listenAddress: "0.0.0.0"
|
||||
protocol: tcp
|
||||
#- role: worker
|
||||
36
acceptance/pipelines/eks-integration-tests.yaml
Normal file
36
acceptance/pipelines/eks-integration-tests.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
name: EKS Integration Tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
IRSA_ROLE_ARN:
|
||||
ASSUME_ROLE_ARN:
|
||||
AWS_REGION:
|
||||
|
||||
jobs:
|
||||
assume-role-in-runner-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
steps:
|
||||
- name: Test aws-actions/configure-aws-credentials Action
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
|
||||
role-duration-seconds: 900
|
||||
assume-role-in-container-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
container:
|
||||
image: amazon/aws-cli
|
||||
env:
|
||||
AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||
AWS_ROLE_ARN: ${{ env.IRSA_ROLE_ARN }}
|
||||
volumes:
|
||||
- /var/run/secrets/eks.amazonaws.com/serviceaccount/token:/var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||
steps:
|
||||
- name: Test aws-actions/configure-aws-credentials Action in container
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
|
||||
role-duration-seconds: 900
|
||||
83
acceptance/pipelines/runner-integration-tests.yaml
Normal file
83
acceptance/pipelines/runner-integration-tests.yaml
Normal file
@@ -0,0 +1,83 @@
|
||||
name: Runner Integration Tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
ImageOS: ubuntu18 # Used by ruby/setup-ruby action | Update me for the runner OS version you are testing against
|
||||
|
||||
jobs:
|
||||
run-step-in-container-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
container:
|
||||
image: alpine
|
||||
steps:
|
||||
- name: Test we are working in the container
|
||||
run: |
|
||||
if [[ $(sed -n '2p' < /etc/os-release | cut -d "=" -f2) != "alpine" ]]; then
|
||||
echo "::error ::Failed OS detection test, could not match /etc/os-release with alpine. Are we really running in the container?"
|
||||
echo "/etc/os-release below:"
|
||||
cat /etc/os-release
|
||||
exit 1
|
||||
fi
|
||||
setup-python-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
steps:
|
||||
- name: Print native Python environment
|
||||
run: |
|
||||
which python
|
||||
python --version
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Test actions/setup-python works
|
||||
run: |
|
||||
VERSION=$(python --version 2>&1 | cut -d ' ' -f2 | cut -d '.' -f1-2)
|
||||
if [[ $VERSION != '3.9' ]]; then
|
||||
echo "Python version detected : $(python --version 2>&1)"
|
||||
echo "::error ::Detected python failed setup version test, could not match version with version specified in the setup action"
|
||||
exit 1
|
||||
else
|
||||
echo "Python version detected : $(python --version 2>&1)"
|
||||
fi
|
||||
setup-node-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
steps:
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '12'
|
||||
- name: Test actions/setup-node works
|
||||
run: |
|
||||
VERSION=$(node --version | cut -c 2- | cut -d '.' -f1)
|
||||
if [[ $VERSION != '12' ]]; then
|
||||
echo "Node version detected : $(node --version 2>&1)"
|
||||
echo "::error ::Detected node failed setup version test, could not match version with version specified in the setup action"
|
||||
exit 1
|
||||
else
|
||||
echo "Node version detected : $(node --version 2>&1)"
|
||||
fi
|
||||
setup-ruby-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
steps:
|
||||
- uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: 3.0
|
||||
bundler-cache: true
|
||||
- name: Test ruby/setup-ruby works
|
||||
run: |
|
||||
VERSION=$(ruby --version | cut -d ' ' -f2 | cut -d '.' -f1-2)
|
||||
if [[ $VERSION != '3.0' ]]; then
|
||||
echo "Ruby version detected : $(ruby --version 2>&1)"
|
||||
echo "::error ::Detected ruby failed setup version test, could not match version with version specified in the setup action"
|
||||
exit 1
|
||||
else
|
||||
echo "Ruby version detected : $(ruby --version 2>&1)"
|
||||
fi
|
||||
python-shell-test:
|
||||
runs-on: ['self-hosted', 'Linux']
|
||||
steps:
|
||||
- name: Test Python shell works
|
||||
run: |
|
||||
import os
|
||||
print(os.environ['PATH'])
|
||||
shell: python
|
||||
36
acceptance/testdata/org.hra.yaml
vendored
Normal file
36
acceptance/testdata/org.hra.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
metadata:
|
||||
name: org
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
name: org-runnerdeploy
|
||||
scaleUpTriggers:
|
||||
- githubEvent:
|
||||
checkRun:
|
||||
types: ["created"]
|
||||
status: "queued"
|
||||
amount: 1
|
||||
duration: "1m"
|
||||
scheduledOverrides:
|
||||
- startTime: "2021-05-11T16:05:00+09:00"
|
||||
endTime: "2021-05-11T16:40:00+09:00"
|
||||
minReplicas: 2
|
||||
- startTime: "2021-05-01T00:00:00+09:00"
|
||||
endTime: "2021-05-03T00:00:00+09:00"
|
||||
recurrenceRule:
|
||||
frequency: Weekly
|
||||
untilTime: "2022-05-01T00:00:00+09:00"
|
||||
minReplicas: 0
|
||||
minReplicas: 0
|
||||
maxReplicas: 5
|
||||
# Used to test that HRA is working for org runners
|
||||
metrics:
|
||||
- type: PercentageRunnersBusy
|
||||
scaleUpThreshold: '0.75'
|
||||
scaleDownThreshold: '0.3'
|
||||
scaleUpFactor: '2'
|
||||
scaleDownFactor: '0.5'
|
||||
- type: TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
repositoryNames:
|
||||
- ${TEST_ORG_REPO}
|
||||
37
acceptance/testdata/org.runnerdeploy.yaml
vendored
Normal file
37
acceptance/testdata/org.runnerdeploy.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: org-runnerdeploy
|
||||
spec:
|
||||
# replicas: 1
|
||||
template:
|
||||
spec:
|
||||
organization: ${TEST_ORG}
|
||||
|
||||
#
|
||||
# Custom runner image
|
||||
#
|
||||
image: ${RUNNER_NAME}:${RUNNER_TAG}
|
||||
imagePullPolicy: IfNotPresent
|
||||
|
||||
#
|
||||
# dockerd within runner container
|
||||
#
|
||||
## Replace `mumoshu/actions-runner-dind:dev` with your dind image
|
||||
#dockerdWithinRunnerContainer: true
|
||||
#image: mumoshu/actions-runner-dind:dev
|
||||
|
||||
#
|
||||
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
|
||||
#
|
||||
#dockerMTU: 1450
|
||||
|
||||
#Runner group
|
||||
# labels:
|
||||
# - "mylabel 1"
|
||||
# - "mylabel 2"
|
||||
|
||||
#
|
||||
# Non-standard working directory
|
||||
#
|
||||
# workDir: "/"
|
||||
25
acceptance/testdata/repo.hra.yaml
vendored
Normal file
25
acceptance/testdata/repo.hra.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
metadata:
|
||||
name: actions-runner-aos-autoscaler
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
name: example-runnerdeploy
|
||||
scaleUpTriggers:
|
||||
- githubEvent:
|
||||
checkRun:
|
||||
types: ["created"]
|
||||
status: "queued"
|
||||
amount: 1
|
||||
duration: "1m"
|
||||
minReplicas: 0
|
||||
maxReplicas: 5
|
||||
metrics:
|
||||
- type: PercentageRunnersBusy
|
||||
scaleUpThreshold: '0.75'
|
||||
scaleDownThreshold: '0.3'
|
||||
scaleUpFactor: '2'
|
||||
scaleDownFactor: '0.5'
|
||||
- type: TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
repositoryNames:
|
||||
- ${TEST_REPO}
|
||||
37
acceptance/testdata/repo.runnerdeploy.yaml
vendored
Normal file
37
acceptance/testdata/repo.runnerdeploy.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: example-runnerdeploy
|
||||
spec:
|
||||
# replicas: 1
|
||||
template:
|
||||
spec:
|
||||
repository: ${TEST_REPO}
|
||||
|
||||
#
|
||||
# Custom runner image
|
||||
#
|
||||
image: ${RUNNER_NAME}:${RUNNER_TAG}
|
||||
imagePullPolicy: IfNotPresent
|
||||
|
||||
#
|
||||
# dockerd within runner container
|
||||
#
|
||||
## Replace `mumoshu/actions-runner-dind:dev` with your dind image
|
||||
#dockerdWithinRunnerContainer: true
|
||||
#image: mumoshu/actions-runner-dind:dev
|
||||
|
||||
#
|
||||
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
|
||||
#
|
||||
#dockerMTU: 1450
|
||||
|
||||
#Runner group
|
||||
# labels:
|
||||
# - "mylabel 1"
|
||||
# - "mylabel 2"
|
||||
|
||||
#
|
||||
# Non-standard working directory
|
||||
#
|
||||
# workDir: "/"
|
||||
29
acceptance/testdata/repo.runnerset.hra.yaml
vendored
Normal file
29
acceptance/testdata/repo.runnerset.hra.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
metadata:
|
||||
name: example-runnerset
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
kind: RunnerSet
|
||||
name: example-runnerset
|
||||
scaleUpTriggers:
|
||||
- githubEvent:
|
||||
checkRun:
|
||||
types: ["created"]
|
||||
status: "queued"
|
||||
amount: 1
|
||||
duration: "1m"
|
||||
# RunnerSet doesn't support scale from/to zero yet
|
||||
minReplicas: 1
|
||||
maxReplicas: 5
|
||||
# This should be less than 600(seconds, the default) for faster testing
|
||||
scaleDownDelaySecondsAfterScaleOut: 60
|
||||
metrics:
|
||||
- type: PercentageRunnersBusy
|
||||
scaleUpThreshold: '0.75'
|
||||
scaleDownThreshold: '0.3'
|
||||
scaleUpFactor: '2'
|
||||
scaleDownFactor: '0.5'
|
||||
- type: TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
repositoryNames:
|
||||
- ${TEST_REPO}
|
||||
56
acceptance/testdata/repo.runnerset.yaml
vendored
Normal file
56
acceptance/testdata/repo.runnerset.yaml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerSet
|
||||
metadata:
|
||||
name: example-runnerset
|
||||
spec:
|
||||
# MANDATORY because it is based on StatefulSet: Results in a below error when omitted:
|
||||
# missing required field "selector" in dev.summerwind.actions.v1alpha1.RunnerSet.spec
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example-runnerset
|
||||
|
||||
# MANDATORY because it is based on StatefulSet: Results in a below error when omitted:
|
||||
# missing required field "serviceName" in dev.summerwind.actions.v1alpha1.RunnerSet.spec]
|
||||
serviceName: example-runnerset
|
||||
|
||||
#replicas: 1
|
||||
|
||||
# From my limited testing, `ephemeral: true` is more reliable.
|
||||
# Seomtimes, updating already deployed runners from `ephemeral: false` to `ephemeral: true` seems to
|
||||
# result in queued jobs hanging forever.
|
||||
ephemeral: false
|
||||
|
||||
repository: ${TEST_REPO}
|
||||
#
|
||||
# Custom runner image
|
||||
#
|
||||
image: ${RUNNER_NAME}:${RUNNER_TAG}
|
||||
#
|
||||
# dockerd within runner container
|
||||
#
|
||||
## Replace `mumoshu/actions-runner-dind:dev` with your dind image
|
||||
#dockerdWithinRunnerContainer: true
|
||||
#
|
||||
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
|
||||
#
|
||||
#dockerMTU: 1450
|
||||
#Runner group
|
||||
# labels:
|
||||
# - "mylabel 1"
|
||||
# - "mylabel 2"
|
||||
labels:
|
||||
- "${RUNNER_LABEL}"
|
||||
#
|
||||
# Non-standard working directory
|
||||
#
|
||||
# workDir: "/"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: example-runnerset
|
||||
spec:
|
||||
containers:
|
||||
- name: runner
|
||||
imagePullPolicy: IfNotPresent
|
||||
#- name: docker
|
||||
# #image: mumoshu/actions-runner-dind:dev
|
||||
9
acceptance/testdata/runnerdeploy.yaml
vendored
9
acceptance/testdata/runnerdeploy.yaml
vendored
@@ -1,9 +0,0 @@
|
||||
apiVersion: actions.summerwind.dev/v1alpha1
|
||||
kind: RunnerDeployment
|
||||
metadata:
|
||||
name: example-runnerdeploy
|
||||
spec:
|
||||
# replicas: 1
|
||||
template:
|
||||
spec:
|
||||
repository: mumoshu/actions-runner-controller-ci
|
||||
20
acceptance/values.yaml
Normal file
20
acceptance/values.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
# Set actions-runner-controller settings for testing
|
||||
githubAPICacheDuration: 10s
|
||||
githubWebhookServer:
|
||||
enabled: true
|
||||
labels: {}
|
||||
replicaCount: 1
|
||||
syncPeriod: 10m
|
||||
secret:
|
||||
create: true
|
||||
name: "github-webhook-server"
|
||||
### GitHub Webhook Configuration
|
||||
#github_webhook_secret_token: ""
|
||||
service:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
nodePort: 31000
|
||||
@@ -54,6 +54,12 @@ type HorizontalRunnerAutoscalerSpec struct {
|
||||
ScaleUpTriggers []ScaleUpTrigger `json:"scaleUpTriggers,omitempty"`
|
||||
|
||||
CapacityReservations []CapacityReservation `json:"capacityReservations,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
|
||||
|
||||
// ScheduledOverrides is the list of ScheduledOverride.
|
||||
// It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||
// The earlier a scheduled override is, the higher it is prioritized.
|
||||
// +optional
|
||||
ScheduledOverrides []ScheduledOverride `json:"scheduledOverrides,omitempty"`
|
||||
}
|
||||
|
||||
type ScaleUpTrigger struct {
|
||||
@@ -72,6 +78,12 @@ type GitHubEventScaleUpTriggerSpec struct {
|
||||
type CheckRunSpec struct {
|
||||
Types []string `json:"types,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
|
||||
// Names is a list of GitHub Actions glob patterns.
|
||||
// Any check_run event whose name matches one of patterns in the list can trigger autoscaling.
|
||||
// Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
|
||||
// So it is very likely that you can utilize this to trigger depending on the job.
|
||||
Names []string `json:"names,omitempty"`
|
||||
}
|
||||
|
||||
// https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||
@@ -94,6 +106,12 @@ type CapacityReservation struct {
|
||||
}
|
||||
|
||||
type ScaleTargetRef struct {
|
||||
// Kind is the type of resource being referenced
|
||||
// +optional
|
||||
// +kubebuilder:validation:Enum=RunnerDeployment;RunnerSet
|
||||
Kind string `json:"kind,omitempty"`
|
||||
|
||||
// Name is the name of resource being referenced
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
@@ -138,6 +156,40 @@ type MetricSpec struct {
|
||||
ScaleDownAdjustment int `json:"scaleDownAdjustment,omitempty"`
|
||||
}
|
||||
|
||||
// ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||
// A schedule can optionally be recurring, so that the correspoding override happens every day, week, month, or year.
|
||||
type ScheduledOverride struct {
|
||||
// StartTime is the time at which the first override starts.
|
||||
StartTime metav1.Time `json:"startTime"`
|
||||
|
||||
// EndTime is the time at which the first override ends.
|
||||
EndTime metav1.Time `json:"endTime"`
|
||||
|
||||
// MinReplicas is the number of runners while overriding.
|
||||
// If omitted, it doesn't override minReplicas.
|
||||
// +optional
|
||||
// +nullable
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
MinReplicas *int `json:"minReplicas,omitempty"`
|
||||
|
||||
// +optional
|
||||
RecurrenceRule RecurrenceRule `json:"recurrenceRule,omitempty"`
|
||||
}
|
||||
|
||||
type RecurrenceRule struct {
|
||||
// Frequency is the name of a predefined interval of each recurrence.
|
||||
// The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
|
||||
// If empty, the corresponding override happens only once.
|
||||
// +optional
|
||||
// +kubebuilder:validation:Enum=Daily;Weekly;Monthly;Yearly
|
||||
Frequency string `json:"frequency,omitempty"`
|
||||
|
||||
// UntilTime is the time of the final recurrence.
|
||||
// If empty, the schedule recurs forever.
|
||||
// +optional
|
||||
UntilTime metav1.Time `json:"untilTime,omitempty"`
|
||||
}
|
||||
|
||||
type HorizontalRunnerAutoscalerStatus struct {
|
||||
// ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
|
||||
// RunnerDeployment's generation, which is updated on mutation by the API Server.
|
||||
@@ -150,10 +202,16 @@ type HorizontalRunnerAutoscalerStatus struct {
|
||||
DesiredReplicas *int `json:"desiredReplicas,omitempty"`
|
||||
|
||||
// +optional
|
||||
// +nullable
|
||||
LastSuccessfulScaleOutTime *metav1.Time `json:"lastSuccessfulScaleOutTime,omitempty"`
|
||||
|
||||
// +optional
|
||||
CacheEntries []CacheEntry `json:"cacheEntries,omitempty"`
|
||||
|
||||
// ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
|
||||
// for observability.
|
||||
// +optional
|
||||
ScheduledOverridesSummary *string `json:"scheduledOverridesSummary,omitempty"`
|
||||
}
|
||||
|
||||
const CacheEntryKeyDesiredReplicas = "desiredReplicas"
|
||||
@@ -169,6 +227,7 @@ type CacheEntry struct {
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.minReplicas",name=Min,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.maxReplicas",name=Max,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.desiredReplicas",name=Desired,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.scheduledOverridesSummary",name=Schedule,type=string
|
||||
|
||||
// HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
|
||||
type HorizontalRunnerAutoscaler struct {
|
||||
|
||||
@@ -19,12 +19,19 @@ package v1alpha1
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// RunnerSpec defines the desired state of Runner
|
||||
type RunnerSpec struct {
|
||||
RunnerConfig `json:",inline"`
|
||||
RunnerPodSpec `json:",inline"`
|
||||
}
|
||||
|
||||
type RunnerConfig struct {
|
||||
// +optional
|
||||
// +kubebuilder:validation:Pattern=`^[^/]+$`
|
||||
Enterprise string `json:"enterprise,omitempty"`
|
||||
@@ -44,54 +51,98 @@ type RunnerSpec struct {
|
||||
Group string `json:"group,omitempty"`
|
||||
|
||||
// +optional
|
||||
Containers []corev1.Container `json:"containers,omitempty"`
|
||||
// +optional
|
||||
DockerdContainerResources corev1.ResourceRequirements `json:"dockerdContainerResources,omitempty"`
|
||||
// +optional
|
||||
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
|
||||
// +optional
|
||||
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
|
||||
// +optional
|
||||
EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"`
|
||||
Ephemeral *bool `json:"ephemeral,omitempty"`
|
||||
|
||||
// +optional
|
||||
Image string `json:"image"`
|
||||
// +optional
|
||||
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
|
||||
// +optional
|
||||
Env []corev1.EnvVar `json:"env,omitempty"`
|
||||
|
||||
// +optional
|
||||
Volumes []corev1.Volume `json:"volumes,omitempty"`
|
||||
// +optional
|
||||
WorkDir string `json:"workDir,omitempty"`
|
||||
|
||||
// +optional
|
||||
InitContainers []corev1.Container `json:"initContainers,omitempty"`
|
||||
// +optional
|
||||
SidecarContainers []corev1.Container `json:"sidecarContainers,omitempty"`
|
||||
// +optional
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
// +optional
|
||||
ServiceAccountName string `json:"serviceAccountName,omitempty"`
|
||||
// +optional
|
||||
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
|
||||
// +optional
|
||||
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
|
||||
// +optional
|
||||
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
||||
// +optional
|
||||
Affinity *corev1.Affinity `json:"affinity,omitempty"`
|
||||
// +optional
|
||||
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
||||
// +optional
|
||||
EphemeralContainers []corev1.EphemeralContainer `json:"ephemeralContainers,omitempty"`
|
||||
// +optional
|
||||
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
|
||||
// +optional
|
||||
DockerdWithinRunnerContainer *bool `json:"dockerdWithinRunnerContainer,omitempty"`
|
||||
// +optional
|
||||
DockerEnabled *bool `json:"dockerEnabled,omitempty"`
|
||||
// +optional
|
||||
DockerMTU *int64 `json:"dockerMTU,omitempty"`
|
||||
// +optional
|
||||
DockerRegistryMirror *string `json:"dockerRegistryMirror,omitempty"`
|
||||
// +optional
|
||||
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
|
||||
}
|
||||
|
||||
// RunnerPodSpec defines the desired pod spec fields of the runner pod
|
||||
type RunnerPodSpec struct {
|
||||
// +optional
|
||||
DockerdContainerResources corev1.ResourceRequirements `json:"dockerdContainerResources,omitempty"`
|
||||
|
||||
// +optional
|
||||
DockerVolumeMounts []corev1.VolumeMount `json:"dockerVolumeMounts,omitempty"`
|
||||
|
||||
// +optional
|
||||
Containers []corev1.Container `json:"containers,omitempty"`
|
||||
|
||||
// +optional
|
||||
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
|
||||
|
||||
// +optional
|
||||
Env []corev1.EnvVar `json:"env,omitempty"`
|
||||
|
||||
// +optional
|
||||
EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"`
|
||||
|
||||
// +optional
|
||||
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
|
||||
|
||||
// +optional
|
||||
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
|
||||
|
||||
// +optional
|
||||
Volumes []corev1.Volume `json:"volumes,omitempty"`
|
||||
|
||||
// +optional
|
||||
EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"`
|
||||
|
||||
// +optional
|
||||
InitContainers []corev1.Container `json:"initContainers,omitempty"`
|
||||
|
||||
// +optional
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
|
||||
// +optional
|
||||
ServiceAccountName string `json:"serviceAccountName,omitempty"`
|
||||
|
||||
// +optional
|
||||
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
|
||||
|
||||
// +optional
|
||||
SidecarContainers []corev1.Container `json:"sidecarContainers,omitempty"`
|
||||
|
||||
// +optional
|
||||
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
|
||||
|
||||
// +optional
|
||||
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
||||
|
||||
// +optional
|
||||
Affinity *corev1.Affinity `json:"affinity,omitempty"`
|
||||
|
||||
// +optional
|
||||
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
||||
|
||||
// +optional
|
||||
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
|
||||
|
||||
// +optional
|
||||
EphemeralContainers []corev1.EphemeralContainer `json:"ephemeralContainers,omitempty"`
|
||||
|
||||
// +optional
|
||||
HostAliases []corev1.HostAlias `json:"hostAliases,omitempty"`
|
||||
|
||||
// RuntimeClassName is the container runtime configuration that containers should run under.
|
||||
// More info: https://kubernetes.io/docs/concepts/containers/runtime-class
|
||||
// +optional
|
||||
RuntimeClassName *string `json:"runtimeClassName,omitempty"`
|
||||
}
|
||||
|
||||
// ValidateRepository validates repository field.
|
||||
@@ -119,10 +170,17 @@ func (rs *RunnerSpec) ValidateRepository() error {
|
||||
|
||||
// RunnerStatus defines the observed state of Runner
|
||||
type RunnerStatus struct {
|
||||
// +optional
|
||||
Registration RunnerStatusRegistration `json:"registration"`
|
||||
Phase string `json:"phase"`
|
||||
Reason string `json:"reason"`
|
||||
Message string `json:"message"`
|
||||
// +optional
|
||||
Phase string `json:"phase,omitempty"`
|
||||
// +optional
|
||||
Reason string `json:"reason,omitempty"`
|
||||
// +optional
|
||||
Message string `json:"message,omitempty"`
|
||||
// +optional
|
||||
// +nullable
|
||||
LastRegistrationCheckTime *metav1.Time `json:"lastRegistrationCheckTime,omitempty"`
|
||||
}
|
||||
|
||||
// RunnerStatusRegistration contains runner registration status
|
||||
@@ -142,6 +200,7 @@ type RunnerStatusRegistration struct {
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.repository",name=Repository,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||
|
||||
// Runner is the Schema for the runners API
|
||||
type Runner struct {
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
)
|
||||
|
||||
@@ -34,7 +34,7 @@ func (r *Runner) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
||||
Complete()
|
||||
}
|
||||
|
||||
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=mutate.runner.actions.summerwind.dev
|
||||
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=mutate.runner.actions.summerwind.dev,sideEffects=None,webhookVersions=v1beta1
|
||||
|
||||
var _ webhook.Defaulter = &Runner{}
|
||||
|
||||
@@ -43,7 +43,7 @@ func (r *Runner) Default() {
|
||||
// Nothing to do.
|
||||
}
|
||||
|
||||
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=validate.runner.actions.summerwind.dev
|
||||
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=validate.runner.actions.summerwind.dev,sideEffects=None,webhookVersions=v1beta1
|
||||
|
||||
var _ webhook.Validator = &Runner{}
|
||||
|
||||
|
||||
@@ -38,20 +38,41 @@ type RunnerDeploymentSpec struct {
|
||||
}
|
||||
|
||||
type RunnerDeploymentStatus struct {
|
||||
AvailableReplicas int `json:"availableReplicas"`
|
||||
ReadyReplicas int `json:"readyReplicas"`
|
||||
// See K8s deployment controller code for reference
|
||||
// https://github.com/kubernetes/kubernetes/blob/ea0764452222146c47ec826977f49d7001b0ea8c/pkg/controller/deployment/sync.go#L487-L505
|
||||
|
||||
// Replicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
|
||||
// AvailableReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||
// This corresponds to the sum of status.availableReplicas of all the runner replica sets.
|
||||
// +optional
|
||||
AvailableReplicas *int `json:"availableReplicas"`
|
||||
|
||||
// ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||
// This corresponds to the sum of status.readyReplicas of all the runner replica sets.
|
||||
// +optional
|
||||
ReadyReplicas *int `json:"readyReplicas"`
|
||||
|
||||
// ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||
// This corresponds to status.replicas of the runner replica set that has the desired template hash.
|
||||
// +optional
|
||||
UpdatedReplicas *int `json:"updatedReplicas"`
|
||||
|
||||
// DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
|
||||
// This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
||||
// +optional
|
||||
Replicas *int `json:"desiredReplicas,omitempty"`
|
||||
DesiredReplicas *int `json:"desiredReplicas"`
|
||||
|
||||
// Replicas is the total number of replicas
|
||||
// +optional
|
||||
Replicas *int `json:"replicas"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Current,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.readyReplicas",name=Ready,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.updatedReplicas",name=Up-To-Date,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Available,type=number
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||
|
||||
// RunnerDeployment is the Schema for the runnerdeployments API
|
||||
type RunnerDeployment struct {
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
)
|
||||
|
||||
@@ -34,7 +34,7 @@ func (r *RunnerDeployment) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
||||
Complete()
|
||||
}
|
||||
|
||||
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerdeployment,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerdeployments,versions=v1alpha1,name=mutate.runnerdeployment.actions.summerwind.dev
|
||||
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerdeployment,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerdeployments,versions=v1alpha1,name=mutate.runnerdeployment.actions.summerwind.dev,webhookVersions=v1beta1
|
||||
|
||||
var _ webhook.Defaulter = &RunnerDeployment{}
|
||||
|
||||
@@ -43,7 +43,7 @@ func (r *RunnerDeployment) Default() {
|
||||
// Nothing to do.
|
||||
}
|
||||
|
||||
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerdeployment,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerdeployments,versions=v1alpha1,name=validate.runnerdeployment.actions.summerwind.dev
|
||||
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerdeployment,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerdeployments,versions=v1alpha1,name=validate.runnerdeployment.actions.summerwind.dev,webhookVersions=v1beta1
|
||||
|
||||
var _ webhook.Validator = &RunnerDeployment{}
|
||||
|
||||
|
||||
@@ -33,8 +33,19 @@ type RunnerReplicaSetSpec struct {
|
||||
}
|
||||
|
||||
type RunnerReplicaSetStatus struct {
|
||||
AvailableReplicas int `json:"availableReplicas"`
|
||||
ReadyReplicas int `json:"readyReplicas"`
|
||||
// See K8s replicaset controller code for reference
|
||||
// https://github.com/kubernetes/kubernetes/blob/ea0764452222146c47ec826977f49d7001b0ea8c/pkg/controller/replicaset/replica_set_utils.go#L101-L106
|
||||
|
||||
// Replicas is the number of runners that are created and still being managed by this runner replica set.
|
||||
// +optional
|
||||
Replicas *int `json:"replicas"`
|
||||
|
||||
// ReadyReplicas is the number of runners that are created and Runnning.
|
||||
ReadyReplicas *int `json:"readyReplicas"`
|
||||
|
||||
// AvailableReplicas is the number of runners that are created and Runnning.
|
||||
// This is currently same as ReadyReplicas but perserved for future use.
|
||||
AvailableReplicas *int `json:"availableReplicas"`
|
||||
}
|
||||
|
||||
type RunnerTemplate struct {
|
||||
@@ -46,8 +57,9 @@ type RunnerTemplate struct {
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Current,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.readyReplicas",name=Ready,type=number
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||
|
||||
// RunnerReplicaSet is the Schema for the runnerreplicasets API
|
||||
type RunnerReplicaSet struct {
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
)
|
||||
|
||||
@@ -34,7 +34,7 @@ func (r *RunnerReplicaSet) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
||||
Complete()
|
||||
}
|
||||
|
||||
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=mutate.runnerreplicaset.actions.summerwind.dev
|
||||
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=mutate.runnerreplicaset.actions.summerwind.dev,sideEffects=None,webhookVersions=v1beta1
|
||||
|
||||
var _ webhook.Defaulter = &RunnerReplicaSet{}
|
||||
|
||||
@@ -43,7 +43,7 @@ func (r *RunnerReplicaSet) Default() {
|
||||
// Nothing to do.
|
||||
}
|
||||
|
||||
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=validate.runnerreplicaset.actions.summerwind.dev
|
||||
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=validate.runnerreplicaset.actions.summerwind.dev,sideEffects=None,webhookVersions=v1beta1
|
||||
|
||||
var _ webhook.Validator = &RunnerReplicaSet{}
|
||||
|
||||
|
||||
88
api/v1alpha1/runnerset_types.go
Normal file
88
api/v1alpha1/runnerset_types.go
Normal file
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
Copyright 2021 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// RunnerSetSpec defines the desired state of RunnerSet
|
||||
type RunnerSetSpec struct {
|
||||
RunnerConfig `json:",inline"`
|
||||
|
||||
appsv1.StatefulSetSpec `json:",inline"`
|
||||
}
|
||||
|
||||
type RunnerSetStatus struct {
|
||||
// See K8s deployment controller code for reference
|
||||
// https://github.com/kubernetes/kubernetes/blob/ea0764452222146c47ec826977f49d7001b0ea8c/pkg/controller/deployment/sync.go#L487-L505
|
||||
|
||||
// AvailableReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||
// This corresponds to the sum of status.availableReplicas of all the runner replica sets.
|
||||
// +optional
|
||||
CurrentReplicas *int `json:"availableReplicas"`
|
||||
|
||||
// ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||
// This corresponds to the sum of status.readyReplicas of all the runner replica sets.
|
||||
// +optional
|
||||
ReadyReplicas *int `json:"readyReplicas"`
|
||||
|
||||
// ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||
// This corresponds to status.replicas of the runner replica set that has the desired template hash.
|
||||
// +optional
|
||||
UpdatedReplicas *int `json:"updatedReplicas"`
|
||||
|
||||
// DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
|
||||
// This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
||||
// +optional
|
||||
DesiredReplicas *int `json:"desiredReplicas"`
|
||||
|
||||
// Replicas is the total number of replicas
|
||||
// +optional
|
||||
Replicas *int `json:"replicas"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.updatedReplicas",name=Up-To-Date,type=number
|
||||
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Available,type=number
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||
|
||||
// RunnerSet is the Schema for the runnersets API
|
||||
type RunnerSet struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec RunnerSetSpec `json:"spec,omitempty"`
|
||||
Status RunnerSetStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// RunnerList contains a list of Runner
|
||||
type RunnerSetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []RunnerSet `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&RunnerSet{}, &RunnerSetList{})
|
||||
}
|
||||
@@ -66,6 +66,11 @@ func (in *CheckRunSpec) DeepCopyInto(out *CheckRunSpec) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Names != nil {
|
||||
in, out := &in.Names, &out.Names
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckRunSpec.
|
||||
@@ -207,6 +212,13 @@ func (in *HorizontalRunnerAutoscalerSpec) DeepCopyInto(out *HorizontalRunnerAuto
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ScheduledOverrides != nil {
|
||||
in, out := &in.ScheduledOverrides, &out.ScheduledOverrides
|
||||
*out = make([]ScheduledOverride, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerSpec.
|
||||
@@ -238,6 +250,11 @@ func (in *HorizontalRunnerAutoscalerStatus) DeepCopyInto(out *HorizontalRunnerAu
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ScheduledOverridesSummary != nil {
|
||||
in, out := &in.ScheduledOverridesSummary, &out.ScheduledOverridesSummary
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerStatus.
|
||||
@@ -310,6 +327,22 @@ func (in *PushSpec) DeepCopy() *PushSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RecurrenceRule) DeepCopyInto(out *RecurrenceRule) {
|
||||
*out = *in
|
||||
in.UntilTime.DeepCopyInto(&out.UntilTime)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceRule.
|
||||
func (in *RecurrenceRule) DeepCopy() *RecurrenceRule {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RecurrenceRule)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Runner) DeepCopyInto(out *Runner) {
|
||||
*out = *in
|
||||
@@ -337,6 +370,56 @@ func (in *Runner) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunnerConfig) DeepCopyInto(out *RunnerConfig) {
|
||||
*out = *in
|
||||
if in.Labels != nil {
|
||||
in, out := &in.Labels, &out.Labels
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Ephemeral != nil {
|
||||
in, out := &in.Ephemeral, &out.Ephemeral
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.DockerdWithinRunnerContainer != nil {
|
||||
in, out := &in.DockerdWithinRunnerContainer, &out.DockerdWithinRunnerContainer
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.DockerEnabled != nil {
|
||||
in, out := &in.DockerEnabled, &out.DockerEnabled
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.DockerMTU != nil {
|
||||
in, out := &in.DockerMTU, &out.DockerMTU
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.DockerRegistryMirror != nil {
|
||||
in, out := &in.DockerRegistryMirror, &out.DockerRegistryMirror
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.VolumeSizeLimit != nil {
|
||||
in, out := &in.VolumeSizeLimit, &out.VolumeSizeLimit
|
||||
x := (*in).DeepCopy()
|
||||
*out = &x
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerConfig.
|
||||
func (in *RunnerConfig) DeepCopy() *RunnerConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RunnerConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunnerDeployment) DeepCopyInto(out *RunnerDeployment) {
|
||||
*out = *in
|
||||
@@ -425,6 +508,26 @@ func (in *RunnerDeploymentSpec) DeepCopy() *RunnerDeploymentSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunnerDeploymentStatus) DeepCopyInto(out *RunnerDeploymentStatus) {
|
||||
*out = *in
|
||||
if in.AvailableReplicas != nil {
|
||||
in, out := &in.AvailableReplicas, &out.AvailableReplicas
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.ReadyReplicas != nil {
|
||||
in, out := &in.ReadyReplicas, &out.ReadyReplicas
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.UpdatedReplicas != nil {
|
||||
in, out := &in.UpdatedReplicas, &out.UpdatedReplicas
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.DesiredReplicas != nil {
|
||||
in, out := &in.DesiredReplicas, &out.DesiredReplicas
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.Replicas != nil {
|
||||
in, out := &in.Replicas, &out.Replicas
|
||||
*out = new(int)
|
||||
@@ -474,13 +577,149 @@ func (in *RunnerList) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunnerPodSpec) DeepCopyInto(out *RunnerPodSpec) {
|
||||
*out = *in
|
||||
in.DockerdContainerResources.DeepCopyInto(&out.DockerdContainerResources)
|
||||
if in.DockerVolumeMounts != nil {
|
||||
in, out := &in.DockerVolumeMounts, &out.DockerVolumeMounts
|
||||
*out = make([]v1.VolumeMount, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Containers != nil {
|
||||
in, out := &in.Containers, &out.Containers
|
||||
*out = make([]v1.Container, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Env != nil {
|
||||
in, out := &in.Env, &out.Env
|
||||
*out = make([]v1.EnvVar, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.EnvFrom != nil {
|
||||
in, out := &in.EnvFrom, &out.EnvFrom
|
||||
*out = make([]v1.EnvFromSource, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
in.Resources.DeepCopyInto(&out.Resources)
|
||||
if in.VolumeMounts != nil {
|
||||
in, out := &in.VolumeMounts, &out.VolumeMounts
|
||||
*out = make([]v1.VolumeMount, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Volumes != nil {
|
||||
in, out := &in.Volumes, &out.Volumes
|
||||
*out = make([]v1.Volume, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.EnableServiceLinks != nil {
|
||||
in, out := &in.EnableServiceLinks, &out.EnableServiceLinks
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.InitContainers != nil {
|
||||
in, out := &in.InitContainers, &out.InitContainers
|
||||
*out = make([]v1.Container, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.AutomountServiceAccountToken != nil {
|
||||
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.SidecarContainers != nil {
|
||||
in, out := &in.SidecarContainers, &out.SidecarContainers
|
||||
*out = make([]v1.Container, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.SecurityContext != nil {
|
||||
in, out := &in.SecurityContext, &out.SecurityContext
|
||||
*out = new(v1.PodSecurityContext)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ImagePullSecrets != nil {
|
||||
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
|
||||
*out = make([]v1.LocalObjectReference, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Affinity != nil {
|
||||
in, out := &in.Affinity, &out.Affinity
|
||||
*out = new(v1.Affinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Tolerations != nil {
|
||||
in, out := &in.Tolerations, &out.Tolerations
|
||||
*out = make([]v1.Toleration, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.TerminationGracePeriodSeconds != nil {
|
||||
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.EphemeralContainers != nil {
|
||||
in, out := &in.EphemeralContainers, &out.EphemeralContainers
|
||||
*out = make([]v1.EphemeralContainer, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.HostAliases != nil {
|
||||
in, out := &in.HostAliases, &out.HostAliases
|
||||
*out = make([]v1.HostAlias, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.RuntimeClassName != nil {
|
||||
in, out := &in.RuntimeClassName, &out.RuntimeClassName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerPodSpec.
|
||||
func (in *RunnerPodSpec) DeepCopy() *RunnerPodSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RunnerPodSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunnerReplicaSet) DeepCopyInto(out *RunnerReplicaSet) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerReplicaSet.
|
||||
@@ -562,6 +801,21 @@ func (in *RunnerReplicaSetSpec) DeepCopy() *RunnerReplicaSetSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunnerReplicaSetStatus) DeepCopyInto(out *RunnerReplicaSetStatus) {
|
||||
*out = *in
|
||||
if in.Replicas != nil {
|
||||
in, out := &in.Replicas, &out.Replicas
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.ReadyReplicas != nil {
|
||||
in, out := &in.ReadyReplicas, &out.ReadyReplicas
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.AvailableReplicas != nil {
|
||||
in, out := &in.AvailableReplicas, &out.AvailableReplicas
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerReplicaSetStatus.
|
||||
@@ -574,121 +828,127 @@ func (in *RunnerReplicaSetStatus) DeepCopy() *RunnerReplicaSetStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunnerSet) DeepCopyInto(out *RunnerSet) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSet.
|
||||
func (in *RunnerSet) DeepCopy() *RunnerSet {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RunnerSet)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *RunnerSet) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunnerSetList) DeepCopyInto(out *RunnerSetList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]RunnerSet, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSetList.
|
||||
func (in *RunnerSetList) DeepCopy() *RunnerSetList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RunnerSetList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *RunnerSetList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunnerSetSpec) DeepCopyInto(out *RunnerSetSpec) {
|
||||
*out = *in
|
||||
in.RunnerConfig.DeepCopyInto(&out.RunnerConfig)
|
||||
in.StatefulSetSpec.DeepCopyInto(&out.StatefulSetSpec)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSetSpec.
|
||||
func (in *RunnerSetSpec) DeepCopy() *RunnerSetSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RunnerSetSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunnerSetStatus) DeepCopyInto(out *RunnerSetStatus) {
|
||||
*out = *in
|
||||
if in.CurrentReplicas != nil {
|
||||
in, out := &in.CurrentReplicas, &out.CurrentReplicas
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.ReadyReplicas != nil {
|
||||
in, out := &in.ReadyReplicas, &out.ReadyReplicas
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.UpdatedReplicas != nil {
|
||||
in, out := &in.UpdatedReplicas, &out.UpdatedReplicas
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.DesiredReplicas != nil {
|
||||
in, out := &in.DesiredReplicas, &out.DesiredReplicas
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
if in.Replicas != nil {
|
||||
in, out := &in.Replicas, &out.Replicas
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSetStatus.
|
||||
func (in *RunnerSetStatus) DeepCopy() *RunnerSetStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RunnerSetStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunnerSpec) DeepCopyInto(out *RunnerSpec) {
|
||||
*out = *in
|
||||
if in.Labels != nil {
|
||||
in, out := &in.Labels, &out.Labels
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Containers != nil {
|
||||
in, out := &in.Containers, &out.Containers
|
||||
*out = make([]v1.Container, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
in.DockerdContainerResources.DeepCopyInto(&out.DockerdContainerResources)
|
||||
in.Resources.DeepCopyInto(&out.Resources)
|
||||
if in.VolumeMounts != nil {
|
||||
in, out := &in.VolumeMounts, &out.VolumeMounts
|
||||
*out = make([]v1.VolumeMount, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.EnvFrom != nil {
|
||||
in, out := &in.EnvFrom, &out.EnvFrom
|
||||
*out = make([]v1.EnvFromSource, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Env != nil {
|
||||
in, out := &in.Env, &out.Env
|
||||
*out = make([]v1.EnvVar, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Volumes != nil {
|
||||
in, out := &in.Volumes, &out.Volumes
|
||||
*out = make([]v1.Volume, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.InitContainers != nil {
|
||||
in, out := &in.InitContainers, &out.InitContainers
|
||||
*out = make([]v1.Container, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.SidecarContainers != nil {
|
||||
in, out := &in.SidecarContainers, &out.SidecarContainers
|
||||
*out = make([]v1.Container, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.AutomountServiceAccountToken != nil {
|
||||
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.SecurityContext != nil {
|
||||
in, out := &in.SecurityContext, &out.SecurityContext
|
||||
*out = new(v1.PodSecurityContext)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ImagePullSecrets != nil {
|
||||
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
|
||||
*out = make([]v1.LocalObjectReference, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Affinity != nil {
|
||||
in, out := &in.Affinity, &out.Affinity
|
||||
*out = new(v1.Affinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Tolerations != nil {
|
||||
in, out := &in.Tolerations, &out.Tolerations
|
||||
*out = make([]v1.Toleration, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.EphemeralContainers != nil {
|
||||
in, out := &in.EphemeralContainers, &out.EphemeralContainers
|
||||
*out = make([]v1.EphemeralContainer, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.TerminationGracePeriodSeconds != nil {
|
||||
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.DockerdWithinRunnerContainer != nil {
|
||||
in, out := &in.DockerdWithinRunnerContainer, &out.DockerdWithinRunnerContainer
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.DockerEnabled != nil {
|
||||
in, out := &in.DockerEnabled, &out.DockerEnabled
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
in.RunnerConfig.DeepCopyInto(&out.RunnerConfig)
|
||||
in.RunnerPodSpec.DeepCopyInto(&out.RunnerPodSpec)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSpec.
|
||||
@@ -705,6 +965,10 @@ func (in *RunnerSpec) DeepCopy() *RunnerSpec {
|
||||
func (in *RunnerStatus) DeepCopyInto(out *RunnerStatus) {
|
||||
*out = *in
|
||||
in.Registration.DeepCopyInto(&out.Registration)
|
||||
if in.LastRegistrationCheckTime != nil {
|
||||
in, out := &in.LastRegistrationCheckTime, &out.LastRegistrationCheckTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerStatus.
|
||||
@@ -790,3 +1054,26 @@ func (in *ScaleUpTrigger) DeepCopy() *ScaleUpTrigger {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScheduledOverride) DeepCopyInto(out *ScheduledOverride) {
|
||||
*out = *in
|
||||
in.StartTime.DeepCopyInto(&out.StartTime)
|
||||
in.EndTime.DeepCopyInto(&out.EndTime)
|
||||
if in.MinReplicas != nil {
|
||||
in, out := &in.MinReplicas, &out.MinReplicas
|
||||
*out = new(int)
|
||||
**out = **in
|
||||
}
|
||||
in.RecurrenceRule.DeepCopyInto(&out.RecurrenceRule)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledOverride.
|
||||
func (in *ScheduledOverride) DeepCopy() *ScheduledOverride {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScheduledOverride)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -21,3 +21,5 @@
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
# Docs
|
||||
docs/
|
||||
@@ -15,17 +15,16 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.7.0
|
||||
version: 0.12.6
|
||||
|
||||
home: https://github.com/summerwind/actions-runner-controller
|
||||
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||
appVersion: 0.19.0
|
||||
|
||||
home: https://github.com/actions-runner-controller/actions-runner-controller
|
||||
|
||||
sources:
|
||||
- https://github.com/summerwind/actions-runner-controller
|
||||
- https://github.com/actions-runner-controller/actions-runner-controller
|
||||
|
||||
maintainers:
|
||||
- name: summerwind
|
||||
email: contact@summerwind.jp
|
||||
url: https://github.com/summerwind
|
||||
- name: funkypenguin
|
||||
email: davidy@funkypenguin.co.nz
|
||||
url: https://www.funkypenguin.co.nz
|
||||
- name: actions-runner-controller
|
||||
url: https://github.com/actions-runner-controller
|
||||
|
||||
84
charts/actions-runner-controller/README.md
Normal file
84
charts/actions-runner-controller/README.md
Normal file
@@ -0,0 +1,84 @@
|
||||
## Docs
|
||||
|
||||
All additional docs are kept in the `docs/` folder, this README is solely for documenting the values.yaml keys and values
|
||||
|
||||
## Values
|
||||
|
||||
_The values are documented as of HEAD_
|
||||
|
||||
_Default values are the defaults set in the charts values.yaml, some properties have default configurations in the code for when the property is omitted or invalid_
|
||||
|
||||
| Key | Description | Default |
|
||||
|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------|
|
||||
| `labels` | Set labels to apply to all resources in the chart | |
|
||||
| `replicaCount` | Set the number of controller pods | 1 |
|
||||
| `syncPeriod` | Set the period in which the controler reconciles the desired runners count | 10m |
|
||||
| `githubAPICacheDuration` | Set the cache period for API calls | |
|
||||
| `githubEnterpriseServerURL` | Set the URL for a self-hosted GitHub Enterprise Server | |
|
||||
| `logLevel` | Set the log level of the controller container | |
|
||||
| `authSecret.create` | Deploy the controller auth secret | false |
|
||||
| `authSecret.name` | Set the name of the auth secret | controller-manager |
|
||||
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||
| `authSecret.github_app_private_key` | The multiline string of your GitHub App's private key. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
|
||||
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
|
||||
| `image.tag` | The tag of the controller container | |
|
||||
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
|
||||
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
|
||||
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
||||
| `metrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||
| `metrics.port` | Set port of metrics service | 8443 |
|
||||
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
||||
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
|
||||
| `fullNameOverride` | Override the full resource names | |
|
||||
| `nameOverride` | Override the resource name prefix | |
|
||||
| `serviceAccont.annotations` | Set annotations to the service account | |
|
||||
| `serviceAccount.create` | Deploy the controller pod under a service account | true |
|
||||
| `podAnnotations` | Set annotations for the controller pod | |
|
||||
| `podLabels` | Set labels for the controller pod | |
|
||||
| `serviceAccount.name` | Set the name of the service account | |
|
||||
| `securityContext` | Set the security context for each container in the controller pod | |
|
||||
| `podSecurityContext` | Set the security context to controller pod | |
|
||||
| `service.port` | Set controller service type | |
|
||||
| `service.type` | Set controller service ports | |
|
||||
| `topologySpreadConstraints` | Set the controller pod topologySpreadConstraints | |
|
||||
| `nodeSelector` | Set the controller pod nodeSelector | |
|
||||
| `resources` | Set the controller pod resources | |
|
||||
| `affinity` | Set the controller pod affinity rules | |
|
||||
| `tolerations` | Set the controller pod tolerations | |
|
||||
| `env` | Set environment variables for the controller container | |
|
||||
| `priorityClassName` | Set the controller pod priorityClassName | |
|
||||
| `scope.watchNamespace` | Tells the controller which namespace to watch if `scope.singleNamespace` is true | |
|
||||
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
|
||||
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
|
||||
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
|
||||
| `githubWebhookServer.syncPeriod` | Set the period in which the controller reconciles the resources | 10m |
|
||||
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
|
||||
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
|
||||
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
|
||||
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
||||
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |
|
||||
| `githubWebhookServer.nameOveride` | Override the resource name prefix | |
|
||||
| `githubWebhookServer.fullNameOveride` | Override the full resource names | |
|
||||
| `githubWebhookServer.serviceAccount.create` | Deploy the githubWebhookServer under a service account | true |
|
||||
| `githubWebhookServer.serviceAccount.annotations` | Set annotations for the service account | |
|
||||
| `githubWebhookServer.serviceAccount.name` | Set the service account name | |
|
||||
| `githubWebhookServer.podAnnotations` | Set annotations for the githubWebhookServer pod | |
|
||||
| `githubWebhookServer.podLabels` | Set labels for the githubWebhookServer pod | |
|
||||
| `githubWebhookServer.podSecurityContext` | Set the security context to githubWebhookServer pod | |
|
||||
| `githubWebhookServer.securityContext` | Set the security context for each container in the githubWebhookServer pod | |
|
||||
| `githubWebhookServer.resources` | Set the githubWebhookServer pod resources | |
|
||||
| `githubWebhookServer.topologySpreadConstraints` | Set the githubWebhookServer pod topologySpreadConstraints | |
|
||||
| `githubWebhookServer.nodeSelector` | Set the githubWebhookServer pod nodeSelector | |
|
||||
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
|
||||
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
|
||||
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
|
||||
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
|
||||
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
|
||||
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||
| `githubWebhookServer.ingress.tls` | Set tls configuration for ingress | |
|
||||
@@ -22,6 +22,9 @@ resources:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
authSecret:
|
||||
create: false
|
||||
|
||||
# Set the following to true to create a dummy secret, allowing the manager pod to start
|
||||
# This is only useful in CI
|
||||
createDummySecret: true
|
||||
@@ -1,23 +1,13 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.3.0
|
||||
controller-gen.kubebuilder.io/version: v0.6.0
|
||||
creationTimestamp: null
|
||||
name: horizontalrunnerautoscalers.actions.summerwind.dev
|
||||
spec:
|
||||
additionalPrinterColumns:
|
||||
- JSONPath: .spec.minReplicas
|
||||
name: Min
|
||||
type: number
|
||||
- JSONPath: .spec.maxReplicas
|
||||
name: Max
|
||||
type: number
|
||||
- JSONPath: .status.desiredReplicas
|
||||
name: Desired
|
||||
type: number
|
||||
group: actions.summerwind.dev
|
||||
names:
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
@@ -25,191 +15,281 @@ spec:
|
||||
plural: horizontalrunnerautoscalers
|
||||
singular: horizontalrunnerautoscaler
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
|
||||
API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HorizontalRunnerAutoscalerSpec defines the desired state of
|
||||
HorizontalRunnerAutoscaler
|
||||
properties:
|
||||
capacityReservations:
|
||||
items:
|
||||
description: CapacityReservation specifies the number of replicas
|
||||
temporarily added to the scale target until ExpirationTime.
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.minReplicas
|
||||
name: Min
|
||||
type: number
|
||||
- jsonPath: .spec.maxReplicas
|
||||
name: Max
|
||||
type: number
|
||||
- jsonPath: .status.desiredReplicas
|
||||
name: Desired
|
||||
type: number
|
||||
- jsonPath: .status.scheduledOverridesSummary
|
||||
name: Schedule
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
|
||||
API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HorizontalRunnerAutoscalerSpec defines the desired state
|
||||
of HorizontalRunnerAutoscaler
|
||||
properties:
|
||||
capacityReservations:
|
||||
items:
|
||||
description: CapacityReservation specifies the number of replicas
|
||||
temporarily added to the scale target until ExpirationTime.
|
||||
properties:
|
||||
expirationTime:
|
||||
format: date-time
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
replicas:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
maxReplicas:
|
||||
description: MinReplicas is the maximum number of replicas the deployment
|
||||
is allowed to scale
|
||||
type: integer
|
||||
metrics:
|
||||
description: Metrics is the collection of various metric targets to
|
||||
calculate desired number of runners
|
||||
items:
|
||||
properties:
|
||||
repositoryNames:
|
||||
description: RepositoryNames is the list of repository names
|
||||
to be used for calculating the metric. For example, a repository
|
||||
name is the REPO part of `github.com/USER/REPO`.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
scaleDownAdjustment:
|
||||
description: ScaleDownAdjustment is the number of runners removed
|
||||
on scale-down. You can only specify either ScaleDownFactor
|
||||
or ScaleDownAdjustment.
|
||||
type: integer
|
||||
scaleDownFactor:
|
||||
description: ScaleDownFactor is the multiplicative factor applied
|
||||
to the current number of runners used to determine how many
|
||||
pods should be removed.
|
||||
type: string
|
||||
scaleDownThreshold:
|
||||
description: ScaleDownThreshold is the percentage of busy runners
|
||||
less than which will trigger the hpa to scale the runners
|
||||
down.
|
||||
type: string
|
||||
scaleUpAdjustment:
|
||||
description: ScaleUpAdjustment is the number of runners added
|
||||
on scale-up. You can only specify either ScaleUpFactor or
|
||||
ScaleUpAdjustment.
|
||||
type: integer
|
||||
scaleUpFactor:
|
||||
description: ScaleUpFactor is the multiplicative factor applied
|
||||
to the current number of runners used to determine how many
|
||||
pods should be added.
|
||||
type: string
|
||||
scaleUpThreshold:
|
||||
description: ScaleUpThreshold is the percentage of busy runners
|
||||
greater than which will trigger the hpa to scale runners up.
|
||||
type: string
|
||||
type:
|
||||
description: Type is the type of metric to be used for autoscaling.
|
||||
The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
minReplicas:
|
||||
description: MinReplicas is the minimum number of replicas the deployment
|
||||
is allowed to scale
|
||||
type: integer
|
||||
scaleDownDelaySecondsAfterScaleOut:
|
||||
description: ScaleDownDelaySecondsAfterScaleUp is the approximate
|
||||
delay for a scale down followed by a scale up Used to prevent flapping
|
||||
(down->up->down->... loop)
|
||||
type: integer
|
||||
scaleTargetRef:
|
||||
description: ScaleTargetRef sis the reference to scaled resource like
|
||||
RunnerDeployment
|
||||
properties:
|
||||
expirationTime:
|
||||
format: date-time
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced
|
||||
enum:
|
||||
- RunnerDeployment
|
||||
- RunnerSet
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
replicas:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
maxReplicas:
|
||||
description: MinReplicas is the maximum number of replicas the deployment
|
||||
is allowed to scale
|
||||
type: integer
|
||||
metrics:
|
||||
description: Metrics is the collection of various metric targets to
|
||||
calculate desired number of runners
|
||||
items:
|
||||
properties:
|
||||
repositoryNames:
|
||||
description: RepositoryNames is the list of repository names to
|
||||
be used for calculating the metric. For example, a repository
|
||||
name is the REPO part of `github.com/USER/REPO`.
|
||||
items:
|
||||
scaleUpTriggers:
|
||||
description: "ScaleUpTriggers is an experimental feature to increase
|
||||
the desired replicas by 1 on each webhook requested received by
|
||||
the webhookBasedAutoscaler. \n This feature requires you to also
|
||||
enable and deploy the webhookBasedAutoscaler onto your cluster.
|
||||
\n Note that the added runners remain until the next sync period
|
||||
at least, and they may or may not be used by GitHub Actions depending
|
||||
on the timing. They are intended to be used to gain \"resource slack\"
|
||||
immediately after you receive a webhook from GitHub, so that you
|
||||
can loosely expect MinReplicas runners to be always available."
|
||||
items:
|
||||
properties:
|
||||
amount:
|
||||
type: integer
|
||||
duration:
|
||||
type: string
|
||||
type: array
|
||||
scaleDownAdjustment:
|
||||
description: ScaleDownAdjustment is the number of runners removed
|
||||
on scale-down. You can only specify either ScaleDownFactor or
|
||||
ScaleDownAdjustment.
|
||||
type: integer
|
||||
scaleDownFactor:
|
||||
description: ScaleDownFactor is the multiplicative factor applied
|
||||
to the current number of runners used to determine how many
|
||||
pods should be removed.
|
||||
type: string
|
||||
scaleDownThreshold:
|
||||
description: ScaleDownThreshold is the percentage of busy runners
|
||||
less than which will trigger the hpa to scale the runners down.
|
||||
type: string
|
||||
scaleUpAdjustment:
|
||||
description: ScaleUpAdjustment is the number of runners added
|
||||
on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
||||
type: integer
|
||||
scaleUpFactor:
|
||||
description: ScaleUpFactor is the multiplicative factor applied
|
||||
to the current number of runners used to determine how many
|
||||
pods should be added.
|
||||
type: string
|
||||
scaleUpThreshold:
|
||||
description: ScaleUpThreshold is the percentage of busy runners
|
||||
greater than which will trigger the hpa to scale runners up.
|
||||
type: string
|
||||
type:
|
||||
description: Type is the type of metric to be used for autoscaling.
|
||||
The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
minReplicas:
|
||||
description: MinReplicas is the minimum number of replicas the deployment
|
||||
is allowed to scale
|
||||
type: integer
|
||||
scaleDownDelaySecondsAfterScaleOut:
|
||||
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay
|
||||
for a scale down followed by a scale up Used to prevent flapping (down->up->down->...
|
||||
loop)
|
||||
type: integer
|
||||
scaleTargetRef:
|
||||
description: ScaleTargetRef sis the reference to scaled resource like
|
||||
RunnerDeployment
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
type: object
|
||||
scaleUpTriggers:
|
||||
description: "ScaleUpTriggers is an experimental feature to increase
|
||||
the desired replicas by 1 on each webhook requested received by the
|
||||
webhookBasedAutoscaler. \n This feature requires you to also enable
|
||||
and deploy the webhookBasedAutoscaler onto your cluster. \n Note that
|
||||
the added runners remain until the next sync period at least, and
|
||||
they may or may not be used by GitHub Actions depending on the timing.
|
||||
They are intended to be used to gain \"resource slack\" immediately
|
||||
after you receive a webhook from GitHub, so that you can loosely expect
|
||||
MinReplicas runners to be always available."
|
||||
items:
|
||||
properties:
|
||||
amount:
|
||||
type: integer
|
||||
duration:
|
||||
type: string
|
||||
githubEvent:
|
||||
properties:
|
||||
checkRun:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
types:
|
||||
items:
|
||||
githubEvent:
|
||||
properties:
|
||||
checkRun:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||
properties:
|
||||
names:
|
||||
description: Names is a list of GitHub Actions glob
|
||||
patterns. Any check_run event whose name matches one
|
||||
of patterns in the list can trigger autoscaling. Note
|
||||
that check_run name seem to equal to the job name
|
||||
you've defined in your actions workflow yaml file.
|
||||
So it is very likely that you can utilize this to
|
||||
trigger depending on the job.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
status:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
pullRequest:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||
properties:
|
||||
branches:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
types:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
push:
|
||||
description: PushSpec is the condition for triggering scale-up
|
||||
on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
status:
|
||||
properties:
|
||||
cacheEntries:
|
||||
items:
|
||||
properties:
|
||||
expirationTime:
|
||||
format: date-time
|
||||
type: string
|
||||
key:
|
||||
type: string
|
||||
value:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
desiredReplicas:
|
||||
description: DesiredReplicas is the total number of desired, non-terminated
|
||||
and latest pods to be set for the primary RunnerSet This doesn't include
|
||||
outdated pods while upgrading the deployment and replacing the runnerset.
|
||||
type: integer
|
||||
lastSuccessfulScaleOutTime:
|
||||
format: date-time
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration is the most recent generation observed
|
||||
for the target. It corresponds to e.g. RunnerDeployment's generation,
|
||||
which is updated on mutation by the API Server.
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
types:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
pullRequest:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||
properties:
|
||||
branches:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
types:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
push:
|
||||
description: PushSpec is the condition for triggering scale-up
|
||||
on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
scheduledOverrides:
|
||||
description: ScheduledOverrides is the list of ScheduledOverride.
|
||||
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec
|
||||
on schedule. The earlier a scheduled override is, the higher it
|
||||
is prioritized.
|
||||
items:
|
||||
description: ScheduledOverride can be used to override a few fields
|
||||
of HorizontalRunnerAutoscalerSpec on schedule. A schedule can
|
||||
optionally be recurring, so that the correspoding override happens
|
||||
every day, week, month, or year.
|
||||
properties:
|
||||
endTime:
|
||||
description: EndTime is the time at which the first override
|
||||
ends.
|
||||
format: date-time
|
||||
type: string
|
||||
minReplicas:
|
||||
description: MinReplicas is the number of runners while overriding.
|
||||
If omitted, it doesn't override minReplicas.
|
||||
minimum: 0
|
||||
nullable: true
|
||||
type: integer
|
||||
recurrenceRule:
|
||||
properties:
|
||||
frequency:
|
||||
description: Frequency is the name of a predefined interval
|
||||
of each recurrence. The valid values are "Daily", "Weekly",
|
||||
"Monthly", and "Yearly". If empty, the corresponding override
|
||||
happens only once.
|
||||
enum:
|
||||
- Daily
|
||||
- Weekly
|
||||
- Monthly
|
||||
- Yearly
|
||||
type: string
|
||||
untilTime:
|
||||
description: UntilTime is the time of the final recurrence.
|
||||
If empty, the schedule recurs forever.
|
||||
format: date-time
|
||||
type: string
|
||||
type: object
|
||||
startTime:
|
||||
description: StartTime is the time at which the first override
|
||||
starts.
|
||||
format: date-time
|
||||
type: string
|
||||
required:
|
||||
- endTime
|
||||
- startTime
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
status:
|
||||
properties:
|
||||
cacheEntries:
|
||||
items:
|
||||
properties:
|
||||
expirationTime:
|
||||
format: date-time
|
||||
type: string
|
||||
key:
|
||||
type: string
|
||||
value:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
desiredReplicas:
|
||||
description: DesiredReplicas is the total number of desired, non-terminated
|
||||
and latest pods to be set for the primary RunnerSet This doesn't
|
||||
include outdated pods while upgrading the deployment and replacing
|
||||
the runnerset.
|
||||
type: integer
|
||||
lastSuccessfulScaleOutTime:
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration is the most recent generation observed
|
||||
for the target. It corresponds to e.g. RunnerDeployment's generation,
|
||||
which is updated on mutation by the API Server.
|
||||
format: int64
|
||||
type: integer
|
||||
scheduledOverridesSummary:
|
||||
description: ScheduledOverridesSummary is the summary of active and
|
||||
upcoming scheduled overrides to be shown in e.g. a column of a `kubectl
|
||||
get hra` output for observability.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
40
charts/actions-runner-controller/docs/UPGRADING.md
Normal file
40
charts/actions-runner-controller/docs/UPGRADING.md
Normal file
@@ -0,0 +1,40 @@
|
||||
## Upgrading
|
||||
|
||||
This project makes extensive use of CRDs to provide much of its functionality. Helm unfortunately does not support [managing](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/) CRDs by design:
|
||||
|
||||
_The full breakdown as to how they came to this decision and why they have taken the approach they have for dealing with CRDs can be found in [Helm Improvement Proposal 11](https://github.com/helm/community/blob/main/hips/hip-0011.md)_
|
||||
|
||||
```
|
||||
There is no support at this time for upgrading or deleting CRDs using Helm. This was an explicit decision after much
|
||||
community discussion due to the danger for unintentional data loss. Furthermore, there is currently no community
|
||||
consensus around how to handle CRDs and their lifecycle. As this evolves, Helm will add support for those use cases.
|
||||
```
|
||||
|
||||
Helm will do an initial install of CRDs but it will not touch them afterwards (update or delete).
|
||||
|
||||
Additionally, because the project leverages CRDs so extensively you **MUST** run the matching controller app container with its matching CRDs i.e. always redeploy your CRDs if you are changing the app version.
|
||||
|
||||
Due to the above you can't just do a `helm upgrade` to release the latest version of the chart, the best practice steps are recorded below:
|
||||
|
||||
## Steps
|
||||
|
||||
1. Upgrade CRDs
|
||||
|
||||
```shell
|
||||
# REMEMBER TO UPDATE THE CHART_VERSION TO RELEVANT CHART VERISON!!!!
|
||||
CHART_VERSION=0.11.0
|
||||
|
||||
curl -L https://github.com/actions-runner-controller/actions-runner-controller/releases/download/actions-runner-controller-${CHART_VERSION}/actions-runner-controller-${CHART_VERSION}.tgz | tar zxv --strip 1 actions-runner-controller/crds
|
||||
|
||||
kubectl apply -f crds/
|
||||
```
|
||||
|
||||
2. Upgrade the Helm release
|
||||
|
||||
```shell
|
||||
helm upgrade --install \
|
||||
--namespace actions-runner-system \
|
||||
--version ${CHART_VERSION} \
|
||||
actions-runner-controller/actions-runner-controller \
|
||||
actions-runner-controller
|
||||
```
|
||||
@@ -54,3 +54,7 @@ Create the name of the service account to use
|
||||
{{- define "actions-runner-controller-github-webhook-server.roleName" -}}
|
||||
{{- include "actions-runner-controller-github-webhook-server.fullname" . }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller-github-webhook-server.serviceMonitorName" -}}
|
||||
{{- include "actions-runner-controller-github-webhook-server.fullname" . | trunc 47 }}-service-monitor
|
||||
{{- end }}
|
||||
|
||||
@@ -92,10 +92,14 @@ Create the name of the service account to use
|
||||
{{- include "actions-runner-controller.fullname" . | trunc 55 }}-webhook
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller.authProxyServiceName" -}}
|
||||
{{- define "actions-runner-controller.metricsServiceName" -}}
|
||||
{{- include "actions-runner-controller.fullname" . | trunc 47 }}-metrics-service
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller.serviceMonitorName" -}}
|
||||
{{- include "actions-runner-controller.fullname" . | trunc 47 }}-service-monitor
|
||||
{{- end }}
|
||||
|
||||
{{- define "actions-runner-controller.selfsignedIssuerName" -}}
|
||||
{{- include "actions-runner-controller.fullname" . }}-selfsigned-issuer
|
||||
{{- end }}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{{- if .Values.metrics.proxy.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
@@ -11,3 +12,4 @@ rules:
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs: ["create"]
|
||||
{{- end }}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
{{- if .Values.metrics.proxy.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
@@ -10,3 +11,4 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "actions-runner-controller.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -5,7 +5,7 @@ apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller.selfsignedIssuerName" . }}
|
||||
namespace: {{ .Namespace }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selfSigned: {}
|
||||
---
|
||||
@@ -13,7 +13,7 @@ apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ include "actions-runner-controller.servingCertName" . }}
|
||||
namespace: {{ .Namespace }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
dnsNames:
|
||||
- {{ include "actions-runner-controller.webhookServiceName" . }}.{{ .Release.Namespace }}.svc
|
||||
|
||||
@@ -3,12 +3,12 @@ kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
name: {{ include "actions-runner-controller.authProxyServiceName" . }}
|
||||
name: {{ include "actions-runner-controller.metricsServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 8443
|
||||
targetPort: https
|
||||
- name: metrics-port
|
||||
port: {{ .Values.metrics.port }}
|
||||
targetPort: metrics-port
|
||||
selector:
|
||||
{{- include "actions-runner-controller.selectorLabels" . | nindent 4 }}
|
||||
@@ -0,0 +1,21 @@
|
||||
{{- if .Values.metrics.serviceMonitor }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
name: {{ include "actions-runner-controller.serviceMonitorName" . }}
|
||||
spec:
|
||||
endpoints:
|
||||
- path: /metrics
|
||||
port: metrics-port
|
||||
{{- if .Values.metrics.proxy.enabled }}
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "actions-runner-controller.selectorLabels" . | nindent 6 }}
|
||||
{{- end }}
|
||||
@@ -18,6 +18,9 @@ spec:
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller.selectorLabels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
@@ -31,13 +34,29 @@ spec:
|
||||
{{- end }}
|
||||
containers:
|
||||
- args:
|
||||
- "--metrics-addr=127.0.0.1:8080"
|
||||
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
||||
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
||||
- "--enable-leader-election"
|
||||
- "--sync-period={{ .Values.syncPeriod }}"
|
||||
- "--docker-image={{ .Values.image.dindSidecarRepositoryAndTag }}"
|
||||
- "--runner-image={{ .Values.image.actionsRunnerRepositoryAndTag }}"
|
||||
{{- if .Values.scope.singleNamespace }}
|
||||
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
|
||||
{{- end }}
|
||||
{{- if .Values.githubAPICacheDuration }}
|
||||
- "--github-api-cache-duration={{ .Values.githubAPICacheDuration }}"
|
||||
{{- end }}
|
||||
{{- if .Values.logLevel }}
|
||||
- "--log-level={{ .Values.logLevel }}"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/manager"
|
||||
env:
|
||||
{{- if .Values.githubEnterpriseServerURL }}
|
||||
- name: GITHUB_ENTERPRISE_URL
|
||||
value: {{ .Values.githubEnterpriseServerURL }}
|
||||
{{- end }}
|
||||
- name: GITHUB_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
@@ -62,13 +81,18 @@ spec:
|
||||
- name: {{ $key }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||
name: manager
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: 9443
|
||||
name: webhook-server
|
||||
protocol: TCP
|
||||
{{- if not .Values.metrics.proxy.enabled }}
|
||||
- containerPort: {{ .Values.metrics.port }}
|
||||
name: metrics-port
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
securityContext:
|
||||
@@ -82,21 +106,23 @@ spec:
|
||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||
name: cert
|
||||
readOnly: true
|
||||
{{- if .Values.metrics.proxy.enabled }}
|
||||
- args:
|
||||
- "--secure-listen-address=0.0.0.0:8443"
|
||||
- "--secure-listen-address=0.0.0.0:{{ .Values.metrics.port }}"
|
||||
- "--upstream=http://127.0.0.1:8080/"
|
||||
- "--logtostderr=true"
|
||||
- "--v=10"
|
||||
image: "{{ .Values.kube_rbac_proxy.image.repository }}:{{ .Values.kube_rbac_proxy.image.tag }}"
|
||||
image: "{{ .Values.metrics.proxy.image.repository }}:{{ .Values.metrics.proxy.image.tag }}"
|
||||
name: kube-rbac-proxy
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
name: https
|
||||
- containerPort: {{ .Values.metrics.port }}
|
||||
name: metrics-port
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 10
|
||||
volumes:
|
||||
- name: secret
|
||||
@@ -120,3 +146,7 @@ spec:
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -19,6 +19,9 @@ spec:
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 8 }}
|
||||
{{- with .Values.githubWebhookServer.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.githubWebhookServer.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
@@ -32,8 +35,13 @@ spec:
|
||||
{{- end }}
|
||||
containers:
|
||||
- args:
|
||||
- "--metrics-addr=127.0.0.1:8080"
|
||||
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
||||
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
||||
- "--sync-period={{ .Values.githubWebhookServer.syncPeriod }}"
|
||||
{{- if .Values.githubWebhookServer.logLevel }}
|
||||
- "--log-level={{ .Values.githubWebhookServer.logLevel }}"
|
||||
{{- end }}
|
||||
command:
|
||||
- "/github-webhook-server"
|
||||
env:
|
||||
@@ -47,32 +55,39 @@ spec:
|
||||
- name: {{ $key }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||
name: github-webhook-server
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
protocol: TCP
|
||||
{{- if not .Values.metrics.proxy.enabled }}
|
||||
- containerPort: {{ .Values.metrics.port }}
|
||||
name: metrics-port
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.githubWebhookServer.resources | nindent 12 }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.githubWebhookServer.securityContext | nindent 12 }}
|
||||
{{- if .Values.metrics.proxy.enabled }}
|
||||
- args:
|
||||
- "--secure-listen-address=0.0.0.0:8443"
|
||||
- "--secure-listen-address=0.0.0.0:{{ .Values.metrics.port }}"
|
||||
- "--upstream=http://127.0.0.1:8080/"
|
||||
- "--logtostderr=true"
|
||||
- "--v=10"
|
||||
image: "{{ .Values.kube_rbac_proxy.image.repository }}:{{ .Values.kube_rbac_proxy.image.tag }}"
|
||||
image: "{{ .Values.metrics.proxy.image.repository }}:{{ .Values.metrics.proxy.image.tag }}"
|
||||
name: kube-rbac-proxy
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
name: https
|
||||
- containerPort: {{ .Values.metrics.port }}
|
||||
name: metrics-port
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 10
|
||||
{{- with .Values.githubWebhookServer.nodeSelector }}
|
||||
nodeSelector:
|
||||
@@ -86,4 +101,8 @@ spec:
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.githubWebhookServer.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{{- if .Values.githubWebhookServer.ingress.enabled -}}
|
||||
{{- $fullName := include "actions-runner-controller-github-webhook-server.fullname" . -}}
|
||||
{{- $svcPort := .Values.githubWebhookServer.service.port -}}
|
||||
{{- $svcPort := (index .Values.githubWebhookServer.service.ports 0).port -}}
|
||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
|
||||
@@ -6,12 +6,21 @@ metadata:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
{{- if .Values.githubWebhookServer.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.githubWebhookServer.service.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.githubWebhookServer.service.type }}
|
||||
ports:
|
||||
{{ range $_, $port := .Values.githubWebhookServer.service.ports -}}
|
||||
- {{ $port | toYaml | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- if .Values.metrics.serviceMonitor }}
|
||||
- name: metrics-port
|
||||
port: {{ .Values.metrics.port }}
|
||||
targetPort: metrics-port
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||
name: {{ include "actions-runner-controller-github-webhook-server.serviceMonitorName" . }}
|
||||
spec:
|
||||
endpoints:
|
||||
- path: /metrics
|
||||
port: metrics-port
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 6 }}
|
||||
{{- end }}
|
||||
@@ -132,6 +132,62 @@ rules:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnersets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnersets/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnersets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- statefulsets/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
@@ -139,6 +195,15 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
||||
@@ -9,7 +9,6 @@ metadata:
|
||||
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "actions-runner-controller.servingCertName" . }}
|
||||
webhooks:
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -26,8 +25,8 @@ webhooks:
|
||||
- UPDATE
|
||||
resources:
|
||||
- runners
|
||||
sideEffects: None
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -44,8 +43,8 @@ webhooks:
|
||||
- UPDATE
|
||||
resources:
|
||||
- runnerdeployments
|
||||
sideEffects: None
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -62,7 +61,27 @@ webhooks:
|
||||
- UPDATE
|
||||
resources:
|
||||
- runnerreplicasets
|
||||
|
||||
sideEffects: None
|
||||
- clientConfig:
|
||||
service:
|
||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /mutate-runner-set-pod
|
||||
failurePolicy: Fail
|
||||
name: mutate-runner-pod.webhook.actions.summerwind.dev
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
resources:
|
||||
- pods
|
||||
sideEffects: None
|
||||
objectSelector:
|
||||
matchLabels:
|
||||
"actions-runner-controller/inject-registration-token": "true"
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
@@ -73,7 +92,6 @@ metadata:
|
||||
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "actions-runner-controller.servingCertName" . }}
|
||||
webhooks:
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -90,8 +108,8 @@ webhooks:
|
||||
- UPDATE
|
||||
resources:
|
||||
- runners
|
||||
sideEffects: None
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -108,8 +126,8 @@ webhooks:
|
||||
- UPDATE
|
||||
resources:
|
||||
- runnerdeployments
|
||||
sideEffects: None
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -126,3 +144,4 @@ webhooks:
|
||||
- UPDATE
|
||||
resources:
|
||||
- runnerreplicasets
|
||||
sideEffects: None
|
||||
|
||||
@@ -8,10 +8,18 @@ replicaCount: 1
|
||||
|
||||
syncPeriod: 10m
|
||||
|
||||
# The controller tries its best not to repeat the duplicate GitHub API call
|
||||
# within this duration.
|
||||
# Defaults to syncPeriod - 10s.
|
||||
#githubAPICacheDuration: 30s
|
||||
|
||||
# The URL of your GitHub Enterprise server, if you're using one.
|
||||
#githubEnterpriseServerURL: https://github.example.com
|
||||
|
||||
# Only 1 authentication method can be deployed at a time
|
||||
# Uncomment the configuration you are applying and fill in the details
|
||||
authSecret:
|
||||
create: true
|
||||
create: false
|
||||
name: "controller-manager"
|
||||
### GitHub Apps Configuration
|
||||
#github_app_id: ""
|
||||
@@ -21,16 +29,11 @@ authSecret:
|
||||
#github_token: ""
|
||||
|
||||
image:
|
||||
repository: summerwind/actions-runner-controller
|
||||
tag: "v0.17.0"
|
||||
repository: "summerwind/actions-runner-controller"
|
||||
actionsRunnerRepositoryAndTag: "summerwind/actions-runner:latest"
|
||||
dindSidecarRepositoryAndTag: "docker:dind"
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
kube_rbac_proxy:
|
||||
image:
|
||||
repository: quay.io/brancz/kube-rbac-proxy
|
||||
tag: v0.8.0
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
@@ -46,6 +49,8 @@ serviceAccount:
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podLabels: {}
|
||||
|
||||
podSecurityContext:
|
||||
{}
|
||||
# fsGroup: 2000
|
||||
@@ -63,6 +68,15 @@ service:
|
||||
type: ClusterIP
|
||||
port: 443
|
||||
|
||||
metrics:
|
||||
serviceMonitor: false
|
||||
port: 8443
|
||||
proxy:
|
||||
enabled: true
|
||||
image:
|
||||
repository: quay.io/brancz/kube-rbac-proxy
|
||||
tag: v0.10.0
|
||||
|
||||
resources:
|
||||
{}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
@@ -76,13 +90,6 @@ resources:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
@@ -100,16 +107,22 @@ env:
|
||||
# https_proxy: "proxy.com:8080"
|
||||
# no_proxy: ""
|
||||
|
||||
scope:
|
||||
# If true, the controller will only watch custom resources in a single namespace
|
||||
singleNamespace: false
|
||||
# If `scope.singleNamespace=true`, the controller will only watch custom resources in this namespace
|
||||
# The default value is "", which means the namespace of the controller
|
||||
watchNamespace: ""
|
||||
|
||||
githubWebhookServer:
|
||||
enabled: false
|
||||
labels: {}
|
||||
replicaCount: 1
|
||||
syncPeriod: 10m
|
||||
secret:
|
||||
create: true
|
||||
create: false
|
||||
name: "github-webhook-server"
|
||||
### GitHub Webhook Configuration
|
||||
#github_webhook_secret_token: ""
|
||||
github_webhook_secret_token: ""
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
@@ -122,6 +135,7 @@ githubWebhookServer:
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
securityContext: {}
|
||||
@@ -132,6 +146,7 @@ githubWebhookServer:
|
||||
priorityClassName: ""
|
||||
service:
|
||||
type: ClusterIP
|
||||
annotations: {}
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: http
|
||||
|
||||
@@ -25,8 +25,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/summerwind/actions-runner-controller/controllers"
|
||||
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/controllers"
|
||||
zaplib "go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/exec"
|
||||
@@ -42,6 +43,13 @@ var (
|
||||
setupLog = ctrl.Log.WithName("setup")
|
||||
)
|
||||
|
||||
const (
|
||||
logLevelDebug = "debug"
|
||||
logLevelInfo = "info"
|
||||
logLevelWarn = "warn"
|
||||
logLevelError = "error"
|
||||
)
|
||||
|
||||
func init() {
|
||||
_ = clientgoscheme.AddToScheme(scheme)
|
||||
|
||||
@@ -63,6 +71,7 @@ func main() {
|
||||
|
||||
enableLeaderElection bool
|
||||
syncPeriod time.Duration
|
||||
logLevel string
|
||||
)
|
||||
|
||||
webhookSecretToken = os.Getenv("GITHUB_WEBHOOK_SECRET_TOKEN")
|
||||
@@ -73,6 +82,7 @@ func main() {
|
||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
||||
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change")
|
||||
flag.StringVar(&logLevel, "log-level", logLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
|
||||
flag.Parse()
|
||||
|
||||
if webhookSecretToken == "" {
|
||||
@@ -86,7 +96,19 @@ func main() {
|
||||
}
|
||||
|
||||
logger := zap.New(func(o *zap.Options) {
|
||||
o.Development = true
|
||||
switch logLevel {
|
||||
case logLevelDebug:
|
||||
o.Development = true
|
||||
case logLevelInfo:
|
||||
lvl := zaplib.NewAtomicLevelAt(zaplib.InfoLevel)
|
||||
o.Level = &lvl
|
||||
case logLevelWarn:
|
||||
lvl := zaplib.NewAtomicLevelAt(zaplib.WarnLevel)
|
||||
o.Level = &lvl
|
||||
case logLevelError:
|
||||
lvl := zaplib.NewAtomicLevelAt(zaplib.ErrorLevel)
|
||||
o.Level = &lvl
|
||||
}
|
||||
})
|
||||
|
||||
ctrl.SetLogger(logger)
|
||||
@@ -110,7 +132,7 @@ func main() {
|
||||
Recorder: nil,
|
||||
Scheme: mgr.GetScheme(),
|
||||
SecretKeyBytes: []byte(webhookSecretToken),
|
||||
WatchNamespace: watchNamespace,
|
||||
Namespace: watchNamespace,
|
||||
}
|
||||
|
||||
if err = hraGitHubWebhook.SetupWithManager(mgr); err != nil {
|
||||
@@ -128,7 +150,7 @@ func main() {
|
||||
defer wg.Done()
|
||||
|
||||
setupLog.Info("starting webhook server")
|
||||
if err := mgr.Start(ctx.Done()); err != nil {
|
||||
if err := mgr.Start(ctx); err != nil {
|
||||
setupLog.Error(err, "problem running manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -161,7 +183,7 @@ func main() {
|
||||
}()
|
||||
|
||||
go func() {
|
||||
<-ctrl.SetupSignalHandler()
|
||||
<-ctrl.SetupSignalHandler().Done()
|
||||
cancel()
|
||||
}()
|
||||
|
||||
|
||||
@@ -1,23 +1,13 @@
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.3.0
|
||||
controller-gen.kubebuilder.io/version: v0.6.0
|
||||
creationTimestamp: null
|
||||
name: horizontalrunnerautoscalers.actions.summerwind.dev
|
||||
spec:
|
||||
additionalPrinterColumns:
|
||||
- JSONPath: .spec.minReplicas
|
||||
name: Min
|
||||
type: number
|
||||
- JSONPath: .spec.maxReplicas
|
||||
name: Max
|
||||
type: number
|
||||
- JSONPath: .status.desiredReplicas
|
||||
name: Desired
|
||||
type: number
|
||||
group: actions.summerwind.dev
|
||||
names:
|
||||
kind: HorizontalRunnerAutoscaler
|
||||
@@ -25,191 +15,281 @@ spec:
|
||||
plural: horizontalrunnerautoscalers
|
||||
singular: horizontalrunnerautoscaler
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
|
||||
API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HorizontalRunnerAutoscalerSpec defines the desired state of
|
||||
HorizontalRunnerAutoscaler
|
||||
properties:
|
||||
capacityReservations:
|
||||
items:
|
||||
description: CapacityReservation specifies the number of replicas
|
||||
temporarily added to the scale target until ExpirationTime.
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.minReplicas
|
||||
name: Min
|
||||
type: number
|
||||
- jsonPath: .spec.maxReplicas
|
||||
name: Max
|
||||
type: number
|
||||
- jsonPath: .status.desiredReplicas
|
||||
name: Desired
|
||||
type: number
|
||||
- jsonPath: .status.scheduledOverridesSummary
|
||||
name: Schedule
|
||||
type: string
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
|
||||
API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: HorizontalRunnerAutoscalerSpec defines the desired state
|
||||
of HorizontalRunnerAutoscaler
|
||||
properties:
|
||||
capacityReservations:
|
||||
items:
|
||||
description: CapacityReservation specifies the number of replicas
|
||||
temporarily added to the scale target until ExpirationTime.
|
||||
properties:
|
||||
expirationTime:
|
||||
format: date-time
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
replicas:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
maxReplicas:
|
||||
description: MinReplicas is the maximum number of replicas the deployment
|
||||
is allowed to scale
|
||||
type: integer
|
||||
metrics:
|
||||
description: Metrics is the collection of various metric targets to
|
||||
calculate desired number of runners
|
||||
items:
|
||||
properties:
|
||||
repositoryNames:
|
||||
description: RepositoryNames is the list of repository names
|
||||
to be used for calculating the metric. For example, a repository
|
||||
name is the REPO part of `github.com/USER/REPO`.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
scaleDownAdjustment:
|
||||
description: ScaleDownAdjustment is the number of runners removed
|
||||
on scale-down. You can only specify either ScaleDownFactor
|
||||
or ScaleDownAdjustment.
|
||||
type: integer
|
||||
scaleDownFactor:
|
||||
description: ScaleDownFactor is the multiplicative factor applied
|
||||
to the current number of runners used to determine how many
|
||||
pods should be removed.
|
||||
type: string
|
||||
scaleDownThreshold:
|
||||
description: ScaleDownThreshold is the percentage of busy runners
|
||||
less than which will trigger the hpa to scale the runners
|
||||
down.
|
||||
type: string
|
||||
scaleUpAdjustment:
|
||||
description: ScaleUpAdjustment is the number of runners added
|
||||
on scale-up. You can only specify either ScaleUpFactor or
|
||||
ScaleUpAdjustment.
|
||||
type: integer
|
||||
scaleUpFactor:
|
||||
description: ScaleUpFactor is the multiplicative factor applied
|
||||
to the current number of runners used to determine how many
|
||||
pods should be added.
|
||||
type: string
|
||||
scaleUpThreshold:
|
||||
description: ScaleUpThreshold is the percentage of busy runners
|
||||
greater than which will trigger the hpa to scale runners up.
|
||||
type: string
|
||||
type:
|
||||
description: Type is the type of metric to be used for autoscaling.
|
||||
The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
minReplicas:
|
||||
description: MinReplicas is the minimum number of replicas the deployment
|
||||
is allowed to scale
|
||||
type: integer
|
||||
scaleDownDelaySecondsAfterScaleOut:
|
||||
description: ScaleDownDelaySecondsAfterScaleUp is the approximate
|
||||
delay for a scale down followed by a scale up Used to prevent flapping
|
||||
(down->up->down->... loop)
|
||||
type: integer
|
||||
scaleTargetRef:
|
||||
description: ScaleTargetRef sis the reference to scaled resource like
|
||||
RunnerDeployment
|
||||
properties:
|
||||
expirationTime:
|
||||
format: date-time
|
||||
kind:
|
||||
description: Kind is the type of resource being referenced
|
||||
enum:
|
||||
- RunnerDeployment
|
||||
- RunnerSet
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of resource being referenced
|
||||
type: string
|
||||
replicas:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
maxReplicas:
|
||||
description: MinReplicas is the maximum number of replicas the deployment
|
||||
is allowed to scale
|
||||
type: integer
|
||||
metrics:
|
||||
description: Metrics is the collection of various metric targets to
|
||||
calculate desired number of runners
|
||||
items:
|
||||
properties:
|
||||
repositoryNames:
|
||||
description: RepositoryNames is the list of repository names to
|
||||
be used for calculating the metric. For example, a repository
|
||||
name is the REPO part of `github.com/USER/REPO`.
|
||||
items:
|
||||
scaleUpTriggers:
|
||||
description: "ScaleUpTriggers is an experimental feature to increase
|
||||
the desired replicas by 1 on each webhook requested received by
|
||||
the webhookBasedAutoscaler. \n This feature requires you to also
|
||||
enable and deploy the webhookBasedAutoscaler onto your cluster.
|
||||
\n Note that the added runners remain until the next sync period
|
||||
at least, and they may or may not be used by GitHub Actions depending
|
||||
on the timing. They are intended to be used to gain \"resource slack\"
|
||||
immediately after you receive a webhook from GitHub, so that you
|
||||
can loosely expect MinReplicas runners to be always available."
|
||||
items:
|
||||
properties:
|
||||
amount:
|
||||
type: integer
|
||||
duration:
|
||||
type: string
|
||||
type: array
|
||||
scaleDownAdjustment:
|
||||
description: ScaleDownAdjustment is the number of runners removed
|
||||
on scale-down. You can only specify either ScaleDownFactor or
|
||||
ScaleDownAdjustment.
|
||||
type: integer
|
||||
scaleDownFactor:
|
||||
description: ScaleDownFactor is the multiplicative factor applied
|
||||
to the current number of runners used to determine how many
|
||||
pods should be removed.
|
||||
type: string
|
||||
scaleDownThreshold:
|
||||
description: ScaleDownThreshold is the percentage of busy runners
|
||||
less than which will trigger the hpa to scale the runners down.
|
||||
type: string
|
||||
scaleUpAdjustment:
|
||||
description: ScaleUpAdjustment is the number of runners added
|
||||
on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
||||
type: integer
|
||||
scaleUpFactor:
|
||||
description: ScaleUpFactor is the multiplicative factor applied
|
||||
to the current number of runners used to determine how many
|
||||
pods should be added.
|
||||
type: string
|
||||
scaleUpThreshold:
|
||||
description: ScaleUpThreshold is the percentage of busy runners
|
||||
greater than which will trigger the hpa to scale runners up.
|
||||
type: string
|
||||
type:
|
||||
description: Type is the type of metric to be used for autoscaling.
|
||||
The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
minReplicas:
|
||||
description: MinReplicas is the minimum number of replicas the deployment
|
||||
is allowed to scale
|
||||
type: integer
|
||||
scaleDownDelaySecondsAfterScaleOut:
|
||||
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay
|
||||
for a scale down followed by a scale up Used to prevent flapping (down->up->down->...
|
||||
loop)
|
||||
type: integer
|
||||
scaleTargetRef:
|
||||
description: ScaleTargetRef sis the reference to scaled resource like
|
||||
RunnerDeployment
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
type: object
|
||||
scaleUpTriggers:
|
||||
description: "ScaleUpTriggers is an experimental feature to increase
|
||||
the desired replicas by 1 on each webhook requested received by the
|
||||
webhookBasedAutoscaler. \n This feature requires you to also enable
|
||||
and deploy the webhookBasedAutoscaler onto your cluster. \n Note that
|
||||
the added runners remain until the next sync period at least, and
|
||||
they may or may not be used by GitHub Actions depending on the timing.
|
||||
They are intended to be used to gain \"resource slack\" immediately
|
||||
after you receive a webhook from GitHub, so that you can loosely expect
|
||||
MinReplicas runners to be always available."
|
||||
items:
|
||||
properties:
|
||||
amount:
|
||||
type: integer
|
||||
duration:
|
||||
type: string
|
||||
githubEvent:
|
||||
properties:
|
||||
checkRun:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
types:
|
||||
items:
|
||||
githubEvent:
|
||||
properties:
|
||||
checkRun:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||
properties:
|
||||
names:
|
||||
description: Names is a list of GitHub Actions glob
|
||||
patterns. Any check_run event whose name matches one
|
||||
of patterns in the list can trigger autoscaling. Note
|
||||
that check_run name seem to equal to the job name
|
||||
you've defined in your actions workflow yaml file.
|
||||
So it is very likely that you can utilize this to
|
||||
trigger depending on the job.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
status:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
pullRequest:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||
properties:
|
||||
branches:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
types:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
push:
|
||||
description: PushSpec is the condition for triggering scale-up
|
||||
on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
status:
|
||||
properties:
|
||||
cacheEntries:
|
||||
items:
|
||||
properties:
|
||||
expirationTime:
|
||||
format: date-time
|
||||
type: string
|
||||
key:
|
||||
type: string
|
||||
value:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
desiredReplicas:
|
||||
description: DesiredReplicas is the total number of desired, non-terminated
|
||||
and latest pods to be set for the primary RunnerSet This doesn't include
|
||||
outdated pods while upgrading the deployment and replacing the runnerset.
|
||||
type: integer
|
||||
lastSuccessfulScaleOutTime:
|
||||
format: date-time
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration is the most recent generation observed
|
||||
for the target. It corresponds to e.g. RunnerDeployment's generation,
|
||||
which is updated on mutation by the API Server.
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
type: object
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
types:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
pullRequest:
|
||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||
properties:
|
||||
branches:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
types:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
push:
|
||||
description: PushSpec is the condition for triggering scale-up
|
||||
on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
scheduledOverrides:
|
||||
description: ScheduledOverrides is the list of ScheduledOverride.
|
||||
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec
|
||||
on schedule. The earlier a scheduled override is, the higher it
|
||||
is prioritized.
|
||||
items:
|
||||
description: ScheduledOverride can be used to override a few fields
|
||||
of HorizontalRunnerAutoscalerSpec on schedule. A schedule can
|
||||
optionally be recurring, so that the correspoding override happens
|
||||
every day, week, month, or year.
|
||||
properties:
|
||||
endTime:
|
||||
description: EndTime is the time at which the first override
|
||||
ends.
|
||||
format: date-time
|
||||
type: string
|
||||
minReplicas:
|
||||
description: MinReplicas is the number of runners while overriding.
|
||||
If omitted, it doesn't override minReplicas.
|
||||
minimum: 0
|
||||
nullable: true
|
||||
type: integer
|
||||
recurrenceRule:
|
||||
properties:
|
||||
frequency:
|
||||
description: Frequency is the name of a predefined interval
|
||||
of each recurrence. The valid values are "Daily", "Weekly",
|
||||
"Monthly", and "Yearly". If empty, the corresponding override
|
||||
happens only once.
|
||||
enum:
|
||||
- Daily
|
||||
- Weekly
|
||||
- Monthly
|
||||
- Yearly
|
||||
type: string
|
||||
untilTime:
|
||||
description: UntilTime is the time of the final recurrence.
|
||||
If empty, the schedule recurs forever.
|
||||
format: date-time
|
||||
type: string
|
||||
type: object
|
||||
startTime:
|
||||
description: StartTime is the time at which the first override
|
||||
starts.
|
||||
format: date-time
|
||||
type: string
|
||||
required:
|
||||
- endTime
|
||||
- startTime
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
status:
|
||||
properties:
|
||||
cacheEntries:
|
||||
items:
|
||||
properties:
|
||||
expirationTime:
|
||||
format: date-time
|
||||
type: string
|
||||
key:
|
||||
type: string
|
||||
value:
|
||||
type: integer
|
||||
type: object
|
||||
type: array
|
||||
desiredReplicas:
|
||||
description: DesiredReplicas is the total number of desired, non-terminated
|
||||
and latest pods to be set for the primary RunnerSet This doesn't
|
||||
include outdated pods while upgrading the deployment and replacing
|
||||
the runnerset.
|
||||
type: integer
|
||||
lastSuccessfulScaleOutTime:
|
||||
format: date-time
|
||||
nullable: true
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration is the most recent generation observed
|
||||
for the target. It corresponds to e.g. RunnerDeployment's generation,
|
||||
which is updated on mutation by the API Server.
|
||||
format: int64
|
||||
type: integer
|
||||
scheduledOverridesSummary:
|
||||
description: ScheduledOverridesSummary is the summary of active and
|
||||
upcoming scheduled overrides to be shown in e.g. a column of a `kubectl
|
||||
get hra` output for observability.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
7163
config/crd/bases/actions.summerwind.dev_runnersets.yaml
Normal file
7163
config/crd/bases/actions.summerwind.dev_runnersets.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -10,7 +10,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-rbac-proxy
|
||||
image: quay.io/brancz/kube-rbac-proxy:v0.8.0
|
||||
image: quay.io/brancz/kube-rbac-proxy:v0.10.0
|
||||
args:
|
||||
- "--secure-listen-address=0.0.0.0:8443"
|
||||
- "--upstream=http://127.0.0.1:8080/"
|
||||
|
||||
@@ -4,5 +4,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: summerwind/actions-runner-controller
|
||||
newTag: latest
|
||||
newName: mumoshu/actions-runner-controller
|
||||
newTag: controller1
|
||||
|
||||
@@ -134,6 +134,67 @@ rules:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnersets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnersets/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
resources:
|
||||
- runnersets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
|
||||
@@ -3,4 +3,4 @@ kind: Runner
|
||||
metadata:
|
||||
name: summerwind-actions-runner-controller
|
||||
spec:
|
||||
repository: summerwind/actions-runner-controller
|
||||
repository: actions-runner-controller/actions-runner-controller
|
||||
|
||||
@@ -6,4 +6,4 @@ spec:
|
||||
replicas: 2
|
||||
template:
|
||||
spec:
|
||||
repository: summerwind/actions-runner-controller
|
||||
repository: actions-runner-controller/actions-runner-controller
|
||||
|
||||
@@ -6,4 +6,4 @@ spec:
|
||||
replicas: 2
|
||||
template:
|
||||
spec:
|
||||
repository: summerwind/actions-runner-controller
|
||||
repository: actions-runner-controller/actions-runner-controller
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
resources:
|
||||
- manifests.yaml
|
||||
- manifests.v1beta1.yaml
|
||||
- service.yaml
|
||||
|
||||
configurations:
|
||||
|
||||
130
config/webhook/manifests.v1beta1.yaml
Normal file
130
config/webhook/manifests.v1beta1.yaml
Normal file
@@ -0,0 +1,130 @@
|
||||
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: mutating-webhook-configuration
|
||||
webhooks:
|
||||
- admissionReviewVersions: null
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /mutate-actions-summerwind-dev-v1alpha1-runner
|
||||
failurePolicy: Fail
|
||||
name: mutate.runner.actions.summerwind.dev
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- runners
|
||||
sideEffects: None
|
||||
- admissionReviewVersions: null
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /mutate-actions-summerwind-dev-v1alpha1-runnerdeployment
|
||||
failurePolicy: Fail
|
||||
name: mutate.runnerdeployment.actions.summerwind.dev
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- runnerdeployments
|
||||
sideEffects: null
|
||||
- admissionReviewVersions: null
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /mutate-actions-summerwind-dev-v1alpha1-runnerreplicaset
|
||||
failurePolicy: Fail
|
||||
name: mutate.runnerreplicaset.actions.summerwind.dev
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- runnerreplicasets
|
||||
sideEffects: None
|
||||
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: validating-webhook-configuration
|
||||
webhooks:
|
||||
- admissionReviewVersions: null
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /validate-actions-summerwind-dev-v1alpha1-runner
|
||||
failurePolicy: Fail
|
||||
name: validate.runner.actions.summerwind.dev
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- runners
|
||||
sideEffects: None
|
||||
- admissionReviewVersions: null
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /validate-actions-summerwind-dev-v1alpha1-runnerdeployment
|
||||
failurePolicy: Fail
|
||||
name: validate.runnerdeployment.actions.summerwind.dev
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- runnerdeployments
|
||||
sideEffects: null
|
||||
- admissionReviewVersions: null
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /validate-actions-summerwind-dev-v1alpha1-runnerreplicaset
|
||||
failurePolicy: Fail
|
||||
name: validate.runnerreplicaset.actions.summerwind.dev
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- runnerreplicasets
|
||||
sideEffects: None
|
||||
@@ -1,124 +1,27 @@
|
||||
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: mutating-webhook-configuration
|
||||
webhooks:
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /mutate-actions-summerwind-dev-v1alpha1-runner
|
||||
failurePolicy: Fail
|
||||
name: mutate.runner.actions.summerwind.dev
|
||||
path: /mutate-runner-set-pod
|
||||
failurePolicy: Ignore
|
||||
name: mutate-runner-pod.webhook.actions.summerwind.dev
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- runners
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /mutate-actions-summerwind-dev-v1alpha1-runnerdeployment
|
||||
failurePolicy: Fail
|
||||
name: mutate.runnerdeployment.actions.summerwind.dev
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- runnerdeployments
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /mutate-actions-summerwind-dev-v1alpha1-runnerreplicaset
|
||||
failurePolicy: Fail
|
||||
name: mutate.runnerreplicaset.actions.summerwind.dev
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- runnerreplicasets
|
||||
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: validating-webhook-configuration
|
||||
webhooks:
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /validate-actions-summerwind-dev-v1alpha1-runner
|
||||
failurePolicy: Fail
|
||||
name: validate.runner.actions.summerwind.dev
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- runners
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /validate-actions-summerwind-dev-v1alpha1-runnerdeployment
|
||||
failurePolicy: Fail
|
||||
name: validate.runnerdeployment.actions.summerwind.dev
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- runnerdeployments
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /validate-actions-summerwind-dev-v1alpha1-runnerreplicaset
|
||||
failurePolicy: Fail
|
||||
name: validate.runnerreplicaset.actions.summerwind.dev
|
||||
rules:
|
||||
- apiGroups:
|
||||
- actions.summerwind.dev
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- runnerreplicasets
|
||||
- pods
|
||||
sideEffects: None
|
||||
|
||||
@@ -9,10 +9,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -34,7 +31,7 @@ func getValueAvailableAt(now time.Time, from, to *time.Time, reservedValue int)
|
||||
return &reservedValue
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) getDesiredReplicasFromCache(hra v1alpha1.HorizontalRunnerAutoscaler) *int {
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) fetchSuggestedReplicasFromCache(hra v1alpha1.HorizontalRunnerAutoscaler) *int {
|
||||
var entry *v1alpha1.CacheEntry
|
||||
|
||||
for i := range hra.Status.CacheEntries {
|
||||
@@ -63,7 +60,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) getDesiredReplicasFromCache(hra v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) determineDesiredReplicas(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
if hra.Spec.MinReplicas == nil {
|
||||
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
|
||||
} else if hra.Spec.MaxReplicas == nil {
|
||||
@@ -71,31 +68,87 @@ func (r *HorizontalRunnerAutoscalerReconciler) determineDesiredReplicas(rd v1alp
|
||||
}
|
||||
|
||||
metrics := hra.Spec.Metrics
|
||||
if len(metrics) == 0 || metrics[0].Type == v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns {
|
||||
return r.calculateReplicasByQueuedAndInProgressWorkflowRuns(rd, hra)
|
||||
} else if metrics[0].Type == v1alpha1.AutoscalingMetricTypePercentageRunnersBusy {
|
||||
return r.calculateReplicasByPercentageRunnersBusy(rd, hra)
|
||||
} else {
|
||||
return nil, fmt.Errorf("validting autoscaling metrics: unsupported metric type %q", metrics[0].Type)
|
||||
numMetrics := len(metrics)
|
||||
if numMetrics == 0 {
|
||||
if len(hra.Spec.ScaleUpTriggers) == 0 {
|
||||
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, nil)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
} else if numMetrics > 2 {
|
||||
return nil, fmt.Errorf("Too many autoscaling metrics configured: It must be 0 to 2, but got %d", numMetrics)
|
||||
}
|
||||
|
||||
primaryMetric := metrics[0]
|
||||
primaryMetricType := primaryMetric.Type
|
||||
|
||||
var (
|
||||
suggested *int
|
||||
err error
|
||||
)
|
||||
|
||||
switch primaryMetricType {
|
||||
case v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns:
|
||||
suggested, err = r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, &primaryMetric)
|
||||
case v1alpha1.AutoscalingMetricTypePercentageRunnersBusy:
|
||||
suggested, err = r.suggestReplicasByPercentageRunnersBusy(st, hra, primaryMetric)
|
||||
default:
|
||||
return nil, fmt.Errorf("validting autoscaling metrics: unsupported metric type %q", primaryMetric)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if suggested != nil && *suggested > 0 {
|
||||
return suggested, nil
|
||||
}
|
||||
|
||||
if len(metrics) == 1 {
|
||||
// This is never supposed to happen but anyway-
|
||||
// Fall-back to `minReplicas + capacityReservedThroughWebhook`.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// At this point, we are sure that there are exactly 2 Metrics entries.
|
||||
|
||||
fallbackMetric := metrics[1]
|
||||
fallbackMetricType := fallbackMetric.Type
|
||||
|
||||
if primaryMetricType != v1alpha1.AutoscalingMetricTypePercentageRunnersBusy ||
|
||||
fallbackMetricType != v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns {
|
||||
|
||||
return nil, fmt.Errorf(
|
||||
"invalid HRA Spec: Metrics[0] of %s cannot be combined with Metrics[1] of %s: The only allowed combination is 0=PercentageRunnersBusy and 1=TotalNumberOfQueuedAndInProgressWorkflowRuns",
|
||||
primaryMetricType, fallbackMetricType,
|
||||
)
|
||||
}
|
||||
|
||||
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, &fallbackMetric)
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInProgressWorkflowRuns(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics *v1alpha1.MetricSpec) (*int, error) {
|
||||
|
||||
var repos [][]string
|
||||
metrics := hra.Spec.Metrics
|
||||
repoID := rd.Spec.Template.Spec.Repository
|
||||
repoID := st.repo
|
||||
if repoID == "" {
|
||||
orgName := rd.Spec.Template.Spec.Organization
|
||||
orgName := st.org
|
||||
if orgName == "" {
|
||||
return nil, fmt.Errorf("asserting runner deployment spec to detect bug: spec.template.organization should not be empty on this code path")
|
||||
}
|
||||
|
||||
if len(metrics[0].RepositoryNames) == 0 {
|
||||
// In case it's an organizational runners deployment without any scaling metrics defined,
|
||||
// we assume that the desired replicas should always be `minReplicas + capacityReservedThroughWebhook`.
|
||||
// See https://github.com/actions-runner-controller/actions-runner-controller/issues/377#issuecomment-793372693
|
||||
if metrics == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(metrics.RepositoryNames) == 0 {
|
||||
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment")
|
||||
}
|
||||
|
||||
for _, repoName := range metrics[0].RepositoryNames {
|
||||
for _, repoName := range metrics.RepositoryNames {
|
||||
repos = append(repos, []string{orgName, repoName})
|
||||
}
|
||||
} else {
|
||||
@@ -165,45 +218,25 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByQueuedAndInPro
|
||||
}
|
||||
}
|
||||
|
||||
minReplicas := *hra.Spec.MinReplicas
|
||||
maxReplicas := *hra.Spec.MaxReplicas
|
||||
necessaryReplicas := queued + inProgress
|
||||
|
||||
var desiredReplicas int
|
||||
|
||||
if necessaryReplicas < minReplicas {
|
||||
desiredReplicas = minReplicas
|
||||
} else if necessaryReplicas > maxReplicas {
|
||||
desiredReplicas = maxReplicas
|
||||
} else {
|
||||
desiredReplicas = necessaryReplicas
|
||||
}
|
||||
|
||||
rd.Status.Replicas = &desiredReplicas
|
||||
replicas := desiredReplicas
|
||||
|
||||
r.Log.V(1).Info(
|
||||
"Calculated desired replicas",
|
||||
"computed_replicas_desired", desiredReplicas,
|
||||
"spec_replicas_min", minReplicas,
|
||||
"spec_replicas_max", maxReplicas,
|
||||
fmt.Sprintf("Suggested desired replicas of %d by TotalNumberOfQueuedAndInProgressWorkflowRuns", necessaryReplicas),
|
||||
"workflow_runs_completed", completed,
|
||||
"workflow_runs_in_progress", inProgress,
|
||||
"workflow_runs_queued", queued,
|
||||
"workflow_runs_unknown", unknown,
|
||||
"namespace", hra.Namespace,
|
||||
"runner_deployment", rd.Name,
|
||||
"kind", st.kind,
|
||||
"name", st.st,
|
||||
"horizontal_runner_autoscaler", hra.Name,
|
||||
)
|
||||
|
||||
return &replicas, nil
|
||||
return &necessaryReplicas, nil
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunnersBusy(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics v1alpha1.MetricSpec) (*int, error) {
|
||||
ctx := context.Background()
|
||||
minReplicas := *hra.Spec.MinReplicas
|
||||
maxReplicas := *hra.Spec.MaxReplicas
|
||||
metrics := hra.Spec.Metrics[0]
|
||||
scaleUpThreshold := defaultScaleUpThreshold
|
||||
scaleDownThreshold := defaultScaleDownThreshold
|
||||
scaleUpFactor := defaultScaleUpFactor
|
||||
@@ -259,32 +292,15 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunn
|
||||
scaleDownFactor = sdf
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(rd.Spec.Selector)
|
||||
runnerMap, err := st.getRunnerMap()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
||||
var runnerList v1alpha1.RunnerList
|
||||
if err := r.List(
|
||||
ctx,
|
||||
&runnerList,
|
||||
client.InNamespace(rd.Namespace),
|
||||
client.MatchingLabelsSelector{Selector: selector},
|
||||
); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
runnerMap := make(map[string]struct{})
|
||||
for _, items := range runnerList.Items {
|
||||
runnerMap[items.Name] = struct{}{}
|
||||
}
|
||||
|
||||
var (
|
||||
enterprise = rd.Spec.Template.Spec.Enterprise
|
||||
organization = rd.Spec.Template.Spec.Organization
|
||||
repository = rd.Spec.Template.Spec.Repository
|
||||
enterprise = st.enterprise
|
||||
organization = st.org
|
||||
repository = st.repo
|
||||
)
|
||||
|
||||
// ListRunners will return all runners managed by GitHub - not restricted to ns
|
||||
@@ -299,7 +315,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunn
|
||||
|
||||
var desiredReplicasBefore int
|
||||
|
||||
if v := rd.Spec.Replicas; v == nil {
|
||||
if v := st.replicas; v == nil {
|
||||
desiredReplicasBefore = 1
|
||||
} else {
|
||||
desiredReplicasBefore = *v
|
||||
@@ -311,7 +327,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunn
|
||||
numRunnersBusy int
|
||||
)
|
||||
|
||||
numRunners = len(runnerList.Items)
|
||||
numRunners = len(runnerMap)
|
||||
|
||||
for _, runner := range runners {
|
||||
if _, ok := runnerMap[*runner.Name]; ok {
|
||||
@@ -338,13 +354,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunn
|
||||
desiredReplicas = int(float64(desiredReplicasBefore) * scaleDownFactor)
|
||||
}
|
||||
} else {
|
||||
desiredReplicas = *rd.Spec.Replicas
|
||||
}
|
||||
|
||||
if desiredReplicas < minReplicas {
|
||||
desiredReplicas = minReplicas
|
||||
} else if desiredReplicas > maxReplicas {
|
||||
desiredReplicas = maxReplicas
|
||||
desiredReplicas = *st.replicas
|
||||
}
|
||||
|
||||
// NOTES for operators:
|
||||
@@ -353,24 +363,20 @@ func (r *HorizontalRunnerAutoscalerReconciler) calculateReplicasByPercentageRunn
|
||||
// the runnerdeployment controller is replacing RunnerReplicaSet for runner update.
|
||||
|
||||
r.Log.V(1).Info(
|
||||
"Calculated desired replicas",
|
||||
"replicas_min", minReplicas,
|
||||
"replicas_max", maxReplicas,
|
||||
fmt.Sprintf("Suggested desired replicas of %d by PercentageRunnersBusy", desiredReplicas),
|
||||
"replicas_desired_before", desiredReplicasBefore,
|
||||
"replicas_desired", desiredReplicas,
|
||||
"num_runners", numRunners,
|
||||
"num_runners_registered", numRunnersRegistered,
|
||||
"num_runners_busy", numRunnersBusy,
|
||||
"namespace", hra.Namespace,
|
||||
"runner_deployment", rd.Name,
|
||||
"kind", st.kind,
|
||||
"name", st.st,
|
||||
"horizontal_runner_autoscaler", hra.Name,
|
||||
"enterprise", enterprise,
|
||||
"organization", organization,
|
||||
"repository", repository,
|
||||
)
|
||||
|
||||
rd.Status.Replicas = &desiredReplicas
|
||||
replicas := desiredReplicas
|
||||
|
||||
return &replicas, nil
|
||||
return &desiredReplicas, nil
|
||||
}
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/summerwind/actions-runner-controller/github"
|
||||
"github.com/summerwind/actions-runner-controller/github/fake"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
@@ -203,13 +204,15 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
Spec: v1alpha1.RunnerDeploymentSpec{
|
||||
Template: v1alpha1.RunnerTemplate{
|
||||
Spec: v1alpha1.RunnerSpec{
|
||||
Repository: tc.repo,
|
||||
RunnerConfig: v1alpha1.RunnerConfig{
|
||||
Repository: tc.repo,
|
||||
},
|
||||
},
|
||||
},
|
||||
Replicas: tc.fixed,
|
||||
},
|
||||
Status: v1alpha1.RunnerDeploymentStatus{
|
||||
Replicas: tc.sReplicas,
|
||||
DesiredReplicas: tc.sReplicas,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -224,7 +227,14 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
got, err := h.computeReplicas(rd, hra)
|
||||
minReplicas, _, _, err := h.getMinReplicas(log, metav1Now.Time, hra)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
st := h.scaleTargetFromRD(context.Background(), rd)
|
||||
|
||||
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
|
||||
if err != nil {
|
||||
if tc.err == "" {
|
||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||
@@ -234,12 +244,8 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
if got == nil {
|
||||
t.Fatalf("unexpected value of rs.Spec.Replicas: nil")
|
||||
}
|
||||
|
||||
if *got != tc.want {
|
||||
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, *got)
|
||||
if got != tc.want {
|
||||
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -424,6 +430,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
_ = v1alpha1.AddToScheme(scheme)
|
||||
|
||||
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
server := fake.NewServer(
|
||||
fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns, tc.workflowRuns_queued, tc.workflowRuns_in_progress),
|
||||
fake.WithListWorkflowJobsResponse(200, tc.workflowJobs),
|
||||
@@ -455,13 +463,15 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.RunnerSpec{
|
||||
Organization: tc.org,
|
||||
RunnerConfig: v1alpha1.RunnerConfig{
|
||||
Organization: tc.org,
|
||||
},
|
||||
},
|
||||
},
|
||||
Replicas: tc.fixed,
|
||||
},
|
||||
Status: v1alpha1.RunnerDeploymentStatus{
|
||||
Replicas: tc.sReplicas,
|
||||
DesiredReplicas: tc.sReplicas,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -485,7 +495,14 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
got, err := h.computeReplicas(rd, hra)
|
||||
minReplicas, _, _, err := h.getMinReplicas(log, metav1Now.Time, hra)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
st := h.scaleTargetFromRD(context.Background(), rd)
|
||||
|
||||
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
|
||||
if err != nil {
|
||||
if tc.err == "" {
|
||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||
@@ -495,12 +512,8 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
if got == nil {
|
||||
t.Fatalf("unexpected value of rs.Spec.Replicas: nil, wanted %v", tc.want)
|
||||
}
|
||||
|
||||
if *got != tc.want {
|
||||
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, *got)
|
||||
if got != tc.want {
|
||||
t.Errorf("%d: incorrect desired replicas: want %d, got %d", i, tc.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -20,13 +20,14 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"net/http"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
gogithub "github.com/google/go-github/v33/github"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -34,7 +35,7 @@ import (
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -53,14 +54,14 @@ type HorizontalRunnerAutoscalerGitHubWebhook struct {
|
||||
// the administrator is generated and specified in GitHub Web UI.
|
||||
SecretKeyBytes []byte
|
||||
|
||||
// WatchNamespace is the namespace to watch for HorizontalRunnerAutoscaler's to be
|
||||
// Namespace is the namespace to watch for HorizontalRunnerAutoscaler's to be
|
||||
// scaled on Webhook.
|
||||
// Set to empty for letting it watch for all namespaces.
|
||||
WatchNamespace string
|
||||
Name string
|
||||
Namespace string
|
||||
Name string
|
||||
}
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -95,6 +96,12 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
||||
}
|
||||
}()
|
||||
|
||||
// respond ok to GET / e.g. for health check
|
||||
if r.Method == http.MethodGet {
|
||||
fmt.Fprintln(w, "webhook server is running")
|
||||
return
|
||||
}
|
||||
|
||||
var payload []byte
|
||||
|
||||
if len(autoscaler.SecretKeyBytes) > 0 {
|
||||
@@ -153,6 +160,13 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
||||
e.Repo.Owner.GetType(),
|
||||
autoscaler.MatchPullRequestEvent(e),
|
||||
)
|
||||
|
||||
if pullRequest := e.PullRequest; pullRequest != nil {
|
||||
log = log.WithValues(
|
||||
"pullRequest.base.ref", e.PullRequest.Base.GetRef(),
|
||||
"action", e.GetAction(),
|
||||
)
|
||||
}
|
||||
case *gogithub.CheckRunEvent:
|
||||
target, err = autoscaler.getScaleUpTarget(
|
||||
context.TODO(),
|
||||
@@ -162,6 +176,13 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
||||
e.Repo.Owner.GetType(),
|
||||
autoscaler.MatchCheckRunEvent(e),
|
||||
)
|
||||
|
||||
if checkRun := e.GetCheckRun(); checkRun != nil {
|
||||
log = log.WithValues(
|
||||
"checkRun.status", checkRun.GetStatus(),
|
||||
"action", e.GetAction(),
|
||||
)
|
||||
}
|
||||
case *gogithub.PingEvent:
|
||||
ok = true
|
||||
|
||||
@@ -189,9 +210,11 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
||||
}
|
||||
|
||||
if target == nil {
|
||||
msg := "no horizontalrunnerautoscaler to scale for this github event"
|
||||
log.Info(
|
||||
"Scale target not found. If this is unexpected, ensure that there is exactly one repository-wide or organizational runner deployment that matches this webhook event",
|
||||
)
|
||||
|
||||
log.Info(msg, "eventType", webhookType)
|
||||
msg := "no horizontalrunnerautoscaler to scale for this github event"
|
||||
|
||||
ok = true
|
||||
|
||||
@@ -224,7 +247,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
||||
}
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) findHRAsByKey(ctx context.Context, value string) ([]v1alpha1.HorizontalRunnerAutoscaler, error) {
|
||||
ns := autoscaler.WatchNamespace
|
||||
ns := autoscaler.Namespace
|
||||
|
||||
var defaultListOpts []client.ListOption
|
||||
|
||||
@@ -238,8 +261,8 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) findHRAsByKey(ctx con
|
||||
opts := append([]client.ListOption{}, defaultListOpts...)
|
||||
opts = append(opts, client.MatchingFields{scaleTargetKey: value})
|
||||
|
||||
if autoscaler.WatchNamespace != "" {
|
||||
opts = append(opts, client.InNamespace(autoscaler.WatchNamespace))
|
||||
if autoscaler.Namespace != "" {
|
||||
opts = append(opts, client.InNamespace(autoscaler.Namespace))
|
||||
}
|
||||
|
||||
var hraList v1alpha1.HorizontalRunnerAutoscalerList
|
||||
@@ -308,6 +331,8 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleTarget(ctx co
|
||||
return nil, err
|
||||
}
|
||||
|
||||
autoscaler.Log.V(1).Info(fmt.Sprintf("Found %d HRAs by key", len(hras)), "key", name)
|
||||
|
||||
targets := autoscaler.searchScaleTargets(hras, f)
|
||||
|
||||
n := len(targets)
|
||||
@@ -326,7 +351,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleTarget(ctx co
|
||||
autoscaler.Log.Info(
|
||||
"Found too many scale targets: "+
|
||||
"It must be exactly one to avoid ambiguity. "+
|
||||
"Either set WatchNamespace for the webhook-based autoscaler to let it only find HRAs in the namespace, "+
|
||||
"Either set Namespace for the webhook-based autoscaler to let it only find HRAs in the namespace, "+
|
||||
"or update Repository or Organization fields in your RunnerDeployment resources to fix the ambiguity.",
|
||||
"scaleTargets", strings.Join(scaleTargetIDs, ","))
|
||||
|
||||
@@ -340,14 +365,16 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleUpTarget(ctx
|
||||
repositoryRunnerKey := owner + "/" + repo
|
||||
|
||||
if target, err := autoscaler.getScaleTarget(ctx, repositoryRunnerKey, f); err != nil {
|
||||
autoscaler.Log.Info("finding repository-wide runner", "repository", repositoryRunnerKey)
|
||||
log.Info("finding repository-wide runner", "repository", repositoryRunnerKey)
|
||||
return nil, err
|
||||
} else if target != nil {
|
||||
autoscaler.Log.Info("scale up target is repository-wide runners", "repository", repo)
|
||||
log.Info("scale up target is repository-wide runners", "repository", repo)
|
||||
return target, nil
|
||||
}
|
||||
|
||||
if ownerType == "User" {
|
||||
log.V(1).Info("no repository runner found", "organization", owner)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -357,12 +384,13 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleUpTarget(ctx
|
||||
} else if target != nil {
|
||||
log.Info("scale up target is organizational runners", "organization", owner)
|
||||
return target, nil
|
||||
} else {
|
||||
log.V(1).Info("no repository runner or organizational runner found",
|
||||
"repository", repositoryRunnerKey,
|
||||
"organization", owner,
|
||||
)
|
||||
}
|
||||
|
||||
log.Info(
|
||||
"Scale target not found. If this is unexpected, ensure that there is exactly one repository-wide or organizational runner deployment that matches this webhook event",
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -415,7 +443,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) SetupWithManager(mgr
|
||||
|
||||
autoscaler.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, func(rawObj runtime.Object) []string {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, func(rawObj client.Object) []string {
|
||||
hra := rawObj.(*v1alpha1.HorizontalRunnerAutoscaler)
|
||||
|
||||
if hra.Spec.ScaleTargetRef.Name == "" {
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/pkg/actionsglob"
|
||||
"github.com/google/go-github/v33/github"
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
)
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||
@@ -27,6 +28,16 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(ev
|
||||
return false
|
||||
}
|
||||
|
||||
if checkRun := event.CheckRun; checkRun != nil && len(cr.Names) > 0 {
|
||||
for _, pat := range cr.Names {
|
||||
if r := actionsglob.Match(pat, checkRun.GetName()); r {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/google/go-github/v33/github"
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
)
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPullRequestEvent(event *github.PullRequestEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/google/go-github/v33/github"
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
)
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event *github.PushEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||
|
||||
@@ -4,21 +4,22 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/google/go-github/v33/github"
|
||||
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/google/go-github/v33/github"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -113,6 +114,19 @@ func TestWebhookPing(t *testing.T) {
|
||||
)
|
||||
}
|
||||
|
||||
func TestGetRequest(t *testing.T) {
|
||||
hra := HorizontalRunnerAutoscalerGitHubWebhook{}
|
||||
request, _ := http.NewRequest(http.MethodGet, "/", nil)
|
||||
recorder := httptest.ResponseRecorder{}
|
||||
|
||||
hra.Handle(&recorder, request)
|
||||
response := recorder.Result()
|
||||
|
||||
if response.StatusCode != http.StatusOK {
|
||||
t.Errorf("want %d, got %d", http.StatusOK, response.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetValidCapacityReservations(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
|
||||
@@ -19,9 +19,13 @@ package controllers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/github"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
@@ -30,10 +34,10 @@ import (
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/controllers/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -52,14 +56,15 @@ type HorizontalRunnerAutoscalerReconciler struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
const defaultReplicas = 1
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerdeployments,verbs=get;list;watch;update;patch
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
ctx := context.Background()
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("horizontalrunnerautoscaler", req.NamespacedName)
|
||||
|
||||
var hra v1alpha1.HorizontalRunnerAutoscaler
|
||||
@@ -71,69 +76,208 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
var rd v1alpha1.RunnerDeployment
|
||||
if err := r.Get(ctx, types.NamespacedName{
|
||||
Namespace: req.Namespace,
|
||||
Name: hra.Spec.ScaleTargetRef.Name,
|
||||
}, &rd); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
metrics.SetHorizontalRunnerAutoscalerSpec(hra.ObjectMeta, hra.Spec)
|
||||
|
||||
if !rd.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
kind := hra.Spec.ScaleTargetRef.Kind
|
||||
|
||||
var replicas *int
|
||||
|
||||
replicasFromCache := r.getDesiredReplicasFromCache(hra)
|
||||
|
||||
if replicasFromCache != nil {
|
||||
replicas = replicasFromCache
|
||||
} else {
|
||||
var err error
|
||||
|
||||
replicas, err = r.computeReplicas(rd, hra)
|
||||
if err != nil {
|
||||
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||
|
||||
log.Error(err, "Could not compute replicas")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
switch kind {
|
||||
case "", "RunnerDeployment":
|
||||
var rd v1alpha1.RunnerDeployment
|
||||
if err := r.Get(ctx, types.NamespacedName{
|
||||
Namespace: req.Namespace,
|
||||
Name: hra.Spec.ScaleTargetRef.Name,
|
||||
}, &rd); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !rd.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
st := r.scaleTargetFromRD(ctx, rd)
|
||||
|
||||
return r.reconcile(ctx, req, log, hra, st, func(newDesiredReplicas int) error {
|
||||
currentDesiredReplicas := getIntOrDefault(rd.Spec.Replicas, defaultReplicas)
|
||||
|
||||
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
||||
if currentDesiredReplicas != newDesiredReplicas {
|
||||
copy := rd.DeepCopy()
|
||||
copy.Spec.Replicas = &newDesiredReplicas
|
||||
|
||||
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
|
||||
return fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
case "RunnerSet":
|
||||
var rs v1alpha1.RunnerSet
|
||||
if err := r.Get(ctx, types.NamespacedName{
|
||||
Namespace: req.Namespace,
|
||||
Name: hra.Spec.ScaleTargetRef.Name,
|
||||
}, &rs); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !rs.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
var replicas *int
|
||||
|
||||
if rs.Spec.Replicas != nil {
|
||||
v := int(*rs.Spec.Replicas)
|
||||
replicas = &v
|
||||
}
|
||||
|
||||
st := scaleTarget{
|
||||
st: rs.Name,
|
||||
kind: "runnerset",
|
||||
enterprise: rs.Spec.Enterprise,
|
||||
org: rs.Spec.Organization,
|
||||
repo: rs.Spec.Repository,
|
||||
replicas: replicas,
|
||||
getRunnerMap: func() (map[string]struct{}, error) {
|
||||
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
||||
var runnerPodList corev1.PodList
|
||||
|
||||
var opts []client.ListOption
|
||||
|
||||
opts = append(opts, client.InNamespace(rs.Namespace))
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts = append(opts, client.MatchingLabelsSelector{Selector: selector})
|
||||
|
||||
r.Log.V(2).Info("Finding runnerset's runner pods with selector", "ns", rs.Namespace)
|
||||
|
||||
if err := r.List(
|
||||
ctx,
|
||||
&runnerPodList,
|
||||
opts...,
|
||||
); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
runnerMap := make(map[string]struct{})
|
||||
for _, items := range runnerPodList.Items {
|
||||
runnerMap[items.Name] = struct{}{}
|
||||
}
|
||||
|
||||
return runnerMap, nil
|
||||
},
|
||||
}
|
||||
|
||||
return r.reconcile(ctx, req, log, hra, st, func(newDesiredReplicas int) error {
|
||||
var replicas *int
|
||||
if rs.Spec.Replicas != nil {
|
||||
v := int(*rs.Spec.Replicas)
|
||||
replicas = &v
|
||||
}
|
||||
currentDesiredReplicas := getIntOrDefault(replicas, defaultReplicas)
|
||||
|
||||
if currentDesiredReplicas != newDesiredReplicas {
|
||||
copy := rs.DeepCopy()
|
||||
v := int32(newDesiredReplicas)
|
||||
copy.Spec.Replicas = &v
|
||||
|
||||
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
|
||||
return fmt.Errorf("patching runnerset to have %d replicas: %w", newDesiredReplicas, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
const defaultReplicas = 1
|
||||
log.Info(fmt.Sprintf("Unsupported scale target %s %s: kind %s is not supported. valid kinds are %s and %s", kind, hra.Spec.ScaleTargetRef.Name, kind, "RunnerDeployment", "RunnerSet"))
|
||||
|
||||
currentDesiredReplicas := getIntOrDefault(rd.Spec.Replicas, defaultReplicas)
|
||||
newDesiredReplicas := getIntOrDefault(replicas, defaultReplicas)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) scaleTargetFromRD(ctx context.Context, rd v1alpha1.RunnerDeployment) scaleTarget {
|
||||
st := scaleTarget{
|
||||
st: rd.Name,
|
||||
kind: "runnerdeployment",
|
||||
enterprise: rd.Spec.Template.Spec.Enterprise,
|
||||
org: rd.Spec.Template.Spec.Organization,
|
||||
repo: rd.Spec.Template.Spec.Repository,
|
||||
replicas: rd.Spec.Replicas,
|
||||
getRunnerMap: func() (map[string]struct{}, error) {
|
||||
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
||||
var runnerList v1alpha1.RunnerList
|
||||
|
||||
var opts []client.ListOption
|
||||
|
||||
opts = append(opts, client.InNamespace(rd.Namespace))
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(getSelector(&rd))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts = append(opts, client.MatchingLabelsSelector{Selector: selector})
|
||||
|
||||
r.Log.V(2).Info("Finding runners with selector", "ns", rd.Namespace)
|
||||
|
||||
if err := r.List(
|
||||
ctx,
|
||||
&runnerList,
|
||||
opts...,
|
||||
); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
runnerMap := make(map[string]struct{})
|
||||
for _, items := range runnerList.Items {
|
||||
runnerMap[items.Name] = struct{}{}
|
||||
}
|
||||
|
||||
return runnerMap, nil
|
||||
},
|
||||
}
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
type scaleTarget struct {
|
||||
st, kind string
|
||||
enterprise, repo, org string
|
||||
replicas *int
|
||||
|
||||
getRunnerMap func() (map[string]struct{}, error)
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) reconcile(ctx context.Context, req ctrl.Request, log logr.Logger, hra v1alpha1.HorizontalRunnerAutoscaler, st scaleTarget, updatedDesiredReplicas func(int) error) (ctrl.Result, error) {
|
||||
now := time.Now()
|
||||
|
||||
for _, reservation := range hra.Spec.CapacityReservations {
|
||||
if reservation.ExpirationTime.Time.After(now) {
|
||||
newDesiredReplicas += reservation.Replicas
|
||||
}
|
||||
minReplicas, active, upcoming, err := r.getMinReplicas(log, now, hra)
|
||||
if err != nil {
|
||||
log.Error(err, "Could not compute min replicas")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if hra.Spec.MaxReplicas != nil && *hra.Spec.MaxReplicas < newDesiredReplicas {
|
||||
newDesiredReplicas = *hra.Spec.MaxReplicas
|
||||
newDesiredReplicas, computedReplicas, computedReplicasFromCache, err := r.computeReplicasWithCache(log, now, st, hra, minReplicas)
|
||||
if err != nil {
|
||||
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||
|
||||
log.Error(err, "Could not compute replicas")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
||||
if currentDesiredReplicas != newDesiredReplicas {
|
||||
copy := rd.DeepCopy()
|
||||
copy.Spec.Replicas = &newDesiredReplicas
|
||||
|
||||
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err)
|
||||
}
|
||||
if err := updatedDesiredReplicas(newDesiredReplicas); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
var updated *v1alpha1.HorizontalRunnerAutoscaler
|
||||
updated := hra.DeepCopy()
|
||||
|
||||
if hra.Status.DesiredReplicas == nil || *hra.Status.DesiredReplicas != newDesiredReplicas {
|
||||
updated = hra.DeepCopy()
|
||||
|
||||
if (hra.Status.DesiredReplicas == nil && newDesiredReplicas > 1) ||
|
||||
(hra.Status.DesiredReplicas != nil && newDesiredReplicas > *hra.Status.DesiredReplicas) {
|
||||
|
||||
@@ -143,11 +287,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
||||
updated.Status.DesiredReplicas = &newDesiredReplicas
|
||||
}
|
||||
|
||||
if replicasFromCache == nil {
|
||||
if updated == nil {
|
||||
updated = hra.DeepCopy()
|
||||
}
|
||||
|
||||
if computedReplicasFromCache == nil {
|
||||
cacheEntries := getValidCacheEntries(updated, now)
|
||||
|
||||
var cacheDuration time.Duration
|
||||
@@ -160,14 +300,39 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
||||
|
||||
updated.Status.CacheEntries = append(cacheEntries, v1alpha1.CacheEntry{
|
||||
Key: v1alpha1.CacheEntryKeyDesiredReplicas,
|
||||
Value: *replicas,
|
||||
Value: computedReplicas,
|
||||
ExpirationTime: metav1.Time{Time: time.Now().Add(cacheDuration)},
|
||||
})
|
||||
}
|
||||
|
||||
if updated != nil {
|
||||
var overridesSummary string
|
||||
|
||||
if (active != nil && upcoming == nil) || (active != nil && upcoming != nil && active.Period.EndTime.Before(upcoming.Period.StartTime)) {
|
||||
after := defaultReplicas
|
||||
if hra.Spec.MinReplicas != nil && *hra.Spec.MinReplicas >= 0 {
|
||||
after = *hra.Spec.MinReplicas
|
||||
}
|
||||
|
||||
overridesSummary = fmt.Sprintf("min=%d time=%s", after, active.Period.EndTime)
|
||||
}
|
||||
|
||||
if active == nil && upcoming != nil || (active != nil && upcoming != nil && active.Period.EndTime.After(upcoming.Period.StartTime)) {
|
||||
if upcoming.ScheduledOverride.MinReplicas != nil {
|
||||
overridesSummary = fmt.Sprintf("min=%d time=%s", *upcoming.ScheduledOverride.MinReplicas, upcoming.Period.StartTime)
|
||||
}
|
||||
}
|
||||
|
||||
if overridesSummary != "" {
|
||||
updated.Status.ScheduledOverridesSummary = &overridesSummary
|
||||
} else {
|
||||
updated.Status.ScheduledOverridesSummary = nil
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(hra.Status, updated.Status) {
|
||||
metrics.SetHorizontalRunnerAutoscalerStatus(updated.ObjectMeta, updated.Status)
|
||||
|
||||
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&hra)); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("patching horizontalrunnerautoscaler status to add cache entry: %w", err)
|
||||
return ctrl.Result{}, fmt.Errorf("patching horizontalrunnerautoscaler status: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -200,14 +365,132 @@ func (r *HorizontalRunnerAutoscalerReconciler) SetupWithManager(mgr ctrl.Manager
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicas(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||
var computedReplicas *int
|
||||
type Override struct {
|
||||
ScheduledOverride v1alpha1.ScheduledOverride
|
||||
Period Period
|
||||
}
|
||||
|
||||
replicas, err := r.determineDesiredReplicas(rd, hra)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) matchScheduledOverrides(log logr.Logger, now time.Time, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, *Override, *Override, error) {
|
||||
var minReplicas *int
|
||||
var active, upcoming *Override
|
||||
|
||||
for _, o := range hra.Spec.ScheduledOverrides {
|
||||
log.V(1).Info(
|
||||
"Checking scheduled override",
|
||||
"now", now,
|
||||
"startTime", o.StartTime,
|
||||
"endTime", o.EndTime,
|
||||
"frequency", o.RecurrenceRule.Frequency,
|
||||
"untilTime", o.RecurrenceRule.UntilTime,
|
||||
)
|
||||
|
||||
a, u, err := MatchSchedule(
|
||||
now, o.StartTime.Time, o.EndTime.Time,
|
||||
RecurrenceRule{
|
||||
Frequency: o.RecurrenceRule.Frequency,
|
||||
UntilTime: o.RecurrenceRule.UntilTime.Time,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return minReplicas, nil, nil, err
|
||||
}
|
||||
|
||||
// Use the first when there are two or more active scheduled overrides,
|
||||
// as the spec defines that the earlier scheduled override is prioritized higher than later ones.
|
||||
if a != nil && active == nil {
|
||||
active = &Override{Period: *a, ScheduledOverride: o}
|
||||
|
||||
if o.MinReplicas != nil {
|
||||
minReplicas = o.MinReplicas
|
||||
|
||||
log.V(1).Info(
|
||||
"Found active scheduled override",
|
||||
"activeStartTime", a.StartTime,
|
||||
"activeEndTime", a.EndTime,
|
||||
"activeMinReplicas", minReplicas,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if u != nil && (upcoming == nil || u.StartTime.Before(upcoming.Period.StartTime)) {
|
||||
upcoming = &Override{Period: *u, ScheduledOverride: o}
|
||||
|
||||
log.V(1).Info(
|
||||
"Found upcoming scheduled override",
|
||||
"upcomingStartTime", u.StartTime,
|
||||
"upcomingEndTime", u.EndTime,
|
||||
"upcomingMinReplicas", o.MinReplicas,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return minReplicas, active, upcoming, nil
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) getMinReplicas(log logr.Logger, now time.Time, hra v1alpha1.HorizontalRunnerAutoscaler) (int, *Override, *Override, error) {
|
||||
minReplicas := defaultReplicas
|
||||
if hra.Spec.MinReplicas != nil && *hra.Spec.MinReplicas >= 0 {
|
||||
minReplicas = *hra.Spec.MinReplicas
|
||||
}
|
||||
|
||||
m, active, upcoming, err := r.matchScheduledOverrides(log, now, hra)
|
||||
if err != nil {
|
||||
return 0, nil, nil, err
|
||||
} else if m != nil {
|
||||
minReplicas = *m
|
||||
}
|
||||
|
||||
return minReplicas, active, upcoming, nil
|
||||
}
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr.Logger, now time.Time, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, minReplicas int) (int, int, *int, error) {
|
||||
var suggestedReplicas int
|
||||
|
||||
suggestedReplicasFromCache := r.fetchSuggestedReplicasFromCache(hra)
|
||||
|
||||
var cached *int
|
||||
|
||||
if suggestedReplicasFromCache != nil {
|
||||
cached = suggestedReplicasFromCache
|
||||
|
||||
if cached == nil {
|
||||
suggestedReplicas = minReplicas
|
||||
} else {
|
||||
suggestedReplicas = *cached
|
||||
}
|
||||
} else {
|
||||
v, err := r.suggestDesiredReplicas(st, hra)
|
||||
if err != nil {
|
||||
return 0, 0, nil, err
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
suggestedReplicas = minReplicas
|
||||
} else {
|
||||
suggestedReplicas = *v
|
||||
}
|
||||
}
|
||||
|
||||
var reserved int
|
||||
|
||||
for _, reservation := range hra.Spec.CapacityReservations {
|
||||
if reservation.ExpirationTime.Time.After(now) {
|
||||
reserved += reservation.Replicas
|
||||
}
|
||||
}
|
||||
|
||||
newDesiredReplicas := suggestedReplicas + reserved
|
||||
|
||||
if newDesiredReplicas < minReplicas {
|
||||
newDesiredReplicas = minReplicas
|
||||
} else if hra.Spec.MaxReplicas != nil && newDesiredReplicas > *hra.Spec.MaxReplicas {
|
||||
newDesiredReplicas = *hra.Spec.MaxReplicas
|
||||
}
|
||||
|
||||
//
|
||||
// Delay scaling-down for ScaleDownDelaySecondsAfterScaleUp or DefaultScaleDownDelay
|
||||
//
|
||||
|
||||
var scaleDownDelay time.Duration
|
||||
|
||||
if hra.Spec.ScaleDownDelaySecondsAfterScaleUp != nil {
|
||||
@@ -216,17 +499,50 @@ func (r *HorizontalRunnerAutoscalerReconciler) computeReplicas(rd v1alpha1.Runne
|
||||
scaleDownDelay = DefaultScaleDownDelay
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
var scaleDownDelayUntil *time.Time
|
||||
|
||||
if hra.Status.DesiredReplicas == nil ||
|
||||
*hra.Status.DesiredReplicas < *replicas ||
|
||||
hra.Status.LastSuccessfulScaleOutTime == nil ||
|
||||
hra.Status.LastSuccessfulScaleOutTime.Add(scaleDownDelay).Before(now) {
|
||||
*hra.Status.DesiredReplicas < newDesiredReplicas ||
|
||||
hra.Status.LastSuccessfulScaleOutTime == nil {
|
||||
|
||||
computedReplicas = replicas
|
||||
} else if hra.Status.LastSuccessfulScaleOutTime != nil {
|
||||
t := hra.Status.LastSuccessfulScaleOutTime.Add(scaleDownDelay)
|
||||
|
||||
// ScaleDownDelay is not passed
|
||||
if t.After(now) {
|
||||
scaleDownDelayUntil = &t
|
||||
newDesiredReplicas = *hra.Status.DesiredReplicas
|
||||
}
|
||||
} else {
|
||||
computedReplicas = hra.Status.DesiredReplicas
|
||||
newDesiredReplicas = *hra.Status.DesiredReplicas
|
||||
}
|
||||
|
||||
return computedReplicas, nil
|
||||
//
|
||||
// Logs various numbers for monitoring and debugging purpose
|
||||
//
|
||||
|
||||
kvs := []interface{}{
|
||||
"suggested", suggestedReplicas,
|
||||
"reserved", reserved,
|
||||
"min", minReplicas,
|
||||
}
|
||||
|
||||
if cached != nil {
|
||||
kvs = append(kvs, "cached", *cached)
|
||||
}
|
||||
|
||||
if scaleDownDelayUntil != nil {
|
||||
kvs = append(kvs, "last_scale_up_time", *hra.Status.LastSuccessfulScaleOutTime)
|
||||
kvs = append(kvs, "scale_down_delay_until", scaleDownDelayUntil)
|
||||
}
|
||||
|
||||
if maxReplicas := hra.Spec.MaxReplicas; maxReplicas != nil {
|
||||
kvs = append(kvs, "max", *maxReplicas)
|
||||
}
|
||||
|
||||
log.V(1).Info(fmt.Sprintf("Calculated desired replicas of %d", newDesiredReplicas),
|
||||
kvs...,
|
||||
)
|
||||
|
||||
return newDesiredReplicas, suggestedReplicas, suggestedReplicasFromCache, nil
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestGetValidCacheEntries(t *testing.T) {
|
||||
|
||||
@@ -7,11 +7,10 @@ import (
|
||||
"net/http/httptest"
|
||||
"time"
|
||||
|
||||
github2 "github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
"github.com/google/go-github/v33/github"
|
||||
github2 "github.com/summerwind/actions-runner-controller/github"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/github/fake"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -24,7 +23,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
)
|
||||
|
||||
type testEnvironment struct {
|
||||
@@ -52,8 +51,9 @@ var (
|
||||
// * starting all the reconcilers
|
||||
// * stopping all the reconcilers after the test ends
|
||||
// Call this function at the start of each of your tests.
|
||||
func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
var stopCh chan struct{}
|
||||
func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
|
||||
var ctx context.Context
|
||||
var cancel func()
|
||||
ns := &corev1.Namespace{}
|
||||
|
||||
env := &testEnvironment{
|
||||
@@ -63,7 +63,7 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
}
|
||||
|
||||
BeforeEach(func() {
|
||||
stopCh = make(chan struct{})
|
||||
ctx, cancel = context.WithCancel(ctx2)
|
||||
*ns = corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
|
||||
}
|
||||
@@ -71,7 +71,9 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
err := k8sClient.Create(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Namespace: ns.Name,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||
|
||||
responses := &fake.FixedResponses{}
|
||||
@@ -97,6 +99,21 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
return fmt.Sprintf("%s%s", ns.Name, name)
|
||||
}
|
||||
|
||||
runnerController := &RunnerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
Log: logf.Log,
|
||||
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
|
||||
GitHubClient: env.ghClient,
|
||||
RunnerImage: "example/runner:test",
|
||||
DockerImage: "example/docker:test",
|
||||
Name: controllerName("runner"),
|
||||
RegistrationRecheckInterval: time.Millisecond,
|
||||
RegistrationRecheckJitter: time.Millisecond,
|
||||
}
|
||||
err = runnerController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller")
|
||||
|
||||
replicasetController := &RunnerReplicaSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
@@ -106,7 +123,7 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
Name: controllerName("runnerreplicaset"),
|
||||
}
|
||||
err = replicasetController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerreplicaset controller")
|
||||
|
||||
deploymentsController := &RunnerDeploymentReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
@@ -116,7 +133,7 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
Name: controllerName("runnnerdeployment"),
|
||||
}
|
||||
err = deploymentsController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerdeployment controller")
|
||||
|
||||
autoscalerController := &HorizontalRunnerAutoscalerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
@@ -128,15 +145,15 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
Name: controllerName("horizontalrunnerautoscaler"),
|
||||
}
|
||||
err = autoscalerController.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup autoscaler controller")
|
||||
|
||||
autoscalerWebhook := &HorizontalRunnerAutoscalerGitHubWebhook{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
Log: logf.Log,
|
||||
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
|
||||
Name: controllerName("horizontalrunnerautoscalergithubwebhook"),
|
||||
WatchNamespace: ns.Name,
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: scheme.Scheme,
|
||||
Log: logf.Log,
|
||||
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
|
||||
Name: controllerName("horizontalrunnerautoscalergithubwebhook"),
|
||||
Namespace: ns.Name,
|
||||
}
|
||||
err = autoscalerWebhook.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup autoscaler webhook")
|
||||
@@ -149,13 +166,13 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err := mgr.Start(stopCh)
|
||||
err := mgr.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
||||
}()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
close(stopCh)
|
||||
defer cancel()
|
||||
|
||||
env.fakeGithubServer.Close()
|
||||
env.webhookServer.Close()
|
||||
@@ -174,6 +191,97 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
|
||||
Describe("when no existing resources exist", func() {
|
||||
|
||||
It("should create and scale organizational runners without any scaling metrics on pull_request event", func() {
|
||||
name := "example-runnerdeploy"
|
||||
|
||||
{
|
||||
rd := &actionsv1alpha1.RunnerDeployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Organization: "test",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
}
|
||||
|
||||
// Scale-up to 2 replicas
|
||||
{
|
||||
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.HorizontalRunnerAutoscalerSpec{
|
||||
ScaleTargetRef: actionsv1alpha1.ScaleTargetRef{
|
||||
Name: name,
|
||||
},
|
||||
MinReplicas: intPtr(2),
|
||||
MaxReplicas: intPtr(5),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
Metrics: nil,
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
PullRequest: &actionsv1alpha1.PullRequestSpec{
|
||||
Types: []string{"created"},
|
||||
Branches: []string{"main"},
|
||||
},
|
||||
},
|
||||
Amount: 1,
|
||||
Duration: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, hra, "test HorizontalRunnerAutoscaler")
|
||||
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 2)
|
||||
ExpectHRAStatusCacheEntryLengthEventuallyEquals(ctx, ns.Name, name, 1)
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(2, "count of fake runners after HRA creation")
|
||||
}
|
||||
|
||||
// Scale-up to 3 replicas on second pull_request create webhook event
|
||||
{
|
||||
env.SendOrgPullRequestEvent("test", "valid", "main", "created")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3, "runners after second webhook event")
|
||||
}
|
||||
})
|
||||
|
||||
It("should create and scale organization's repository runners on pull_request event", func() {
|
||||
name := "example-runnerdeploy"
|
||||
|
||||
@@ -197,11 +305,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -235,7 +347,11 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
MinReplicas: intPtr(1),
|
||||
MaxReplicas: intPtr(3),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
Metrics: nil,
|
||||
Metrics: []actionsv1alpha1.MetricSpec{
|
||||
{
|
||||
Type: actionsv1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns,
|
||||
},
|
||||
},
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
@@ -324,11 +440,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -338,9 +458,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
||||
}
|
||||
|
||||
@@ -360,7 +477,11 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
MinReplicas: intPtr(1),
|
||||
MaxReplicas: intPtr(5),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
Metrics: nil,
|
||||
Metrics: []actionsv1alpha1.MetricSpec{
|
||||
{
|
||||
Type: actionsv1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns,
|
||||
},
|
||||
},
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
@@ -380,10 +501,9 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3)
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
||||
env.SyncRunnerRegistrations()
|
||||
ExpectRunnerCountEventuallyEquals(ctx, ns.Name, 3)
|
||||
}
|
||||
|
||||
// Scale-up to 4 replicas on first check_run create webhook event
|
||||
@@ -391,19 +511,117 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 4, "runners after first webhook event")
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(4, "count of fake list runners")
|
||||
env.SyncRunnerRegistrations()
|
||||
ExpectRunnerCountEventuallyEquals(ctx, ns.Name, 4)
|
||||
}
|
||||
|
||||
// Scale-up to 5 replicas on second check_run create webhook event
|
||||
{
|
||||
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 5, "runners after second webhook event")
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(5, "count of fake list runners")
|
||||
env.SyncRunnerRegistrations()
|
||||
ExpectRunnerCountEventuallyEquals(ctx, ns.Name, 5)
|
||||
}
|
||||
})
|
||||
|
||||
It("should create and scale organization's repository runners only on check_run event", func() {
|
||||
name := "example-runnerdeploy"
|
||||
|
||||
{
|
||||
rd := &actionsv1alpha1.RunnerDeployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
||||
}
|
||||
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(5, "count of fake list runners")
|
||||
// Scale-up to 3 replicas by the default TotalNumberOfQueuedAndInProgressWorkflowRuns-based scaling
|
||||
// See workflowRunsFor3Replicas_queued and workflowRunsFor3Replicas_in_progress for GitHub List-Runners API responses
|
||||
// used while testing.
|
||||
{
|
||||
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.HorizontalRunnerAutoscalerSpec{
|
||||
ScaleTargetRef: actionsv1alpha1.ScaleTargetRef{
|
||||
Name: name,
|
||||
},
|
||||
MinReplicas: intPtr(1),
|
||||
MaxReplicas: intPtr(5),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
CheckRun: &actionsv1alpha1.CheckRunSpec{
|
||||
Types: []string{"created"},
|
||||
Status: "pending",
|
||||
},
|
||||
},
|
||||
Amount: 1,
|
||||
Duration: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, hra, "test HorizontalRunnerAutoscaler")
|
||||
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
||||
}
|
||||
|
||||
// Scale-up to 2 replicas on first check_run create webhook event
|
||||
{
|
||||
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 2, "runners after first webhook event")
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(2, "count of fake list runners")
|
||||
}
|
||||
|
||||
// Scale-up to 3 replicas on second check_run create webhook event
|
||||
{
|
||||
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3, "runners after second webhook event")
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
||||
}
|
||||
})
|
||||
|
||||
It("should create and scale user's repository runners on pull_request event", func() {
|
||||
@@ -429,11 +647,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -467,7 +689,11 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
MinReplicas: intPtr(1),
|
||||
MaxReplicas: intPtr(3),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
Metrics: nil,
|
||||
Metrics: []actionsv1alpha1.MetricSpec{
|
||||
{
|
||||
Type: actionsv1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns,
|
||||
},
|
||||
},
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
@@ -535,6 +761,103 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("should create and scale user's repository runners only on pull_request event", func() {
|
||||
name := "example-runnerdeploy"
|
||||
|
||||
{
|
||||
rd := &actionsv1alpha1.RunnerDeployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
}
|
||||
|
||||
{
|
||||
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.HorizontalRunnerAutoscalerSpec{
|
||||
ScaleTargetRef: actionsv1alpha1.ScaleTargetRef{
|
||||
Name: name,
|
||||
},
|
||||
MinReplicas: intPtr(1),
|
||||
MaxReplicas: intPtr(3),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
PullRequest: &actionsv1alpha1.PullRequestSpec{
|
||||
Types: []string{"created"},
|
||||
Branches: []string{"main"},
|
||||
},
|
||||
},
|
||||
Amount: 1,
|
||||
Duration: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, hra, "test HorizontalRunnerAutoscaler")
|
||||
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake runners after HRA creation")
|
||||
}
|
||||
|
||||
// Scale-up to 2 replicas on first pull_request create webhook event
|
||||
{
|
||||
env.SendUserPullRequestEvent("test", "valid", "main", "created")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 2, "runners after first webhook event")
|
||||
ExpectHRADesiredReplicasEquals(ctx, ns.Name, name, 2, "runner deployment desired replicas")
|
||||
}
|
||||
|
||||
// Scale-up to 3 replicas on second pull_request create webhook event
|
||||
{
|
||||
env.SendUserPullRequestEvent("test", "valid", "main", "created")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3, "runners after second webhook event")
|
||||
ExpectHRADesiredReplicasEquals(ctx, ns.Name, name, 3, "runner deployment desired replicas")
|
||||
}
|
||||
})
|
||||
|
||||
It("should create and scale user's repository runners on check_run event", func() {
|
||||
name := "example-runnerdeploy"
|
||||
|
||||
@@ -558,11 +881,118 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
||||
}
|
||||
|
||||
// Scale-up to 3 replicas by the default TotalNumberOfQueuedAndInProgressWorkflowRuns-based scaling
|
||||
// See workflowRunsFor3Replicas_queued and workflowRunsFor3Replicas_in_progress for GitHub List-Runners API responses
|
||||
// used while testing.
|
||||
{
|
||||
hra := &actionsv1alpha1.HorizontalRunnerAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.HorizontalRunnerAutoscalerSpec{
|
||||
ScaleTargetRef: actionsv1alpha1.ScaleTargetRef{
|
||||
Name: name,
|
||||
},
|
||||
MinReplicas: intPtr(1),
|
||||
MaxReplicas: intPtr(5),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
Metrics: []actionsv1alpha1.MetricSpec{
|
||||
{
|
||||
Type: actionsv1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns,
|
||||
},
|
||||
},
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
CheckRun: &actionsv1alpha1.CheckRunSpec{
|
||||
Types: []string{"created"},
|
||||
Status: "pending",
|
||||
},
|
||||
},
|
||||
Amount: 1,
|
||||
Duration: metav1.Duration{Duration: time.Minute},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ExpectCreate(ctx, hra, "test HorizontalRunnerAutoscaler")
|
||||
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3)
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
||||
}
|
||||
|
||||
// Scale-up to 4 replicas on first check_run create webhook event
|
||||
{
|
||||
env.SendUserCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 4, "runners after first webhook event")
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(4, "count of fake list runners")
|
||||
}
|
||||
|
||||
// Scale-up to 5 replicas on second check_run create webhook event
|
||||
{
|
||||
env.SendUserCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 5, "runners after second webhook event")
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(5, "count of fake list runners")
|
||||
}
|
||||
})
|
||||
|
||||
It("should create and scale user's repository runners only on check_run event", func() {
|
||||
name := "example-runnerdeploy"
|
||||
|
||||
{
|
||||
rd := &actionsv1alpha1.RunnerDeployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerDeploymentSpec{
|
||||
Replicas: intPtr(1),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -594,7 +1024,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
MinReplicas: intPtr(1),
|
||||
MaxReplicas: intPtr(5),
|
||||
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
|
||||
Metrics: nil,
|
||||
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
|
||||
{
|
||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||
@@ -613,31 +1042,27 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
ExpectCreate(ctx, hra, "test HorizontalRunnerAutoscaler")
|
||||
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3)
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
||||
}
|
||||
|
||||
// Scale-up to 4 replicas on first check_run create webhook event
|
||||
// Scale-up to 2 replicas on first check_run create webhook event
|
||||
{
|
||||
env.SendUserCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 4, "runners after first webhook event")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 2, "runners after first webhook event")
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(2, "count of fake list runners")
|
||||
}
|
||||
|
||||
{
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(4, "count of fake list runners")
|
||||
}
|
||||
|
||||
// Scale-up to 5 replicas on second check_run create webhook event
|
||||
// Scale-up to 3 replicas on second check_run create webhook event
|
||||
{
|
||||
env.SendUserCheckRunEvent("test", "valid", "pending", "created")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 5, "runners after second webhook event")
|
||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3, "runners after second webhook event")
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
||||
}
|
||||
|
||||
env.ExpectRegisteredNumberCountEventuallyEquals(5, "count of fake list runners")
|
||||
})
|
||||
|
||||
})
|
||||
@@ -782,7 +1207,7 @@ func (env *testEnvironment) SyncRunnerRegistrations() {
|
||||
env.fakeRunnerList.Sync(runnerList.Items)
|
||||
}
|
||||
|
||||
func ExpectCreate(ctx context.Context, rd runtime.Object, s string) {
|
||||
func ExpectCreate(ctx context.Context, rd client.Object, s string) {
|
||||
err := k8sClient.Create(ctx, rd)
|
||||
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), fmt.Sprintf("failed to create %s resource", s))
|
||||
@@ -824,6 +1249,44 @@ func ExpectRunnerSetsCountEventuallyEquals(ctx context.Context, ns string, count
|
||||
time.Second*10, time.Millisecond*500).Should(BeEquivalentTo(count), optionalDescription...)
|
||||
}
|
||||
|
||||
func ExpectRunnerCountEventuallyEquals(ctx context.Context, ns string, count int, optionalDescription ...interface{}) {
|
||||
runners := actionsv1alpha1.RunnerList{Items: []actionsv1alpha1.Runner{}}
|
||||
|
||||
EventuallyWithOffset(
|
||||
1,
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runners, client.InNamespace(ns))
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "list runner sets")
|
||||
}
|
||||
|
||||
var running int
|
||||
|
||||
for _, r := range runners.Items {
|
||||
if r.Status.Phase == string(corev1.PodRunning) {
|
||||
running++
|
||||
} else {
|
||||
var pod corev1.Pod
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns, Name: r.Name}, &pod); err != nil {
|
||||
logf.Log.Error(err, "simulating pod controller")
|
||||
continue
|
||||
}
|
||||
|
||||
copy := pod.DeepCopy()
|
||||
copy.Status.Phase = corev1.PodRunning
|
||||
|
||||
if err := k8sClient.Status().Patch(ctx, copy, client.MergeFrom(&pod)); err != nil {
|
||||
logf.Log.Error(err, "simulating pod controller")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return running
|
||||
},
|
||||
time.Second*10, time.Millisecond*500).Should(BeEquivalentTo(count), optionalDescription...)
|
||||
}
|
||||
|
||||
func ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx context.Context, ns string, count int, optionalDescription ...interface{}) {
|
||||
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
|
||||
|
||||
|
||||
67
controllers/metrics/horizontalrunnerautoscaler.go
Normal file
67
controllers/metrics/horizontalrunnerautoscaler.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
hraName = "horizontalrunnerautoscaler"
|
||||
hraNamespace = "namespace"
|
||||
)
|
||||
|
||||
var (
|
||||
horizontalRunnerAutoscalerMetrics = []prometheus.Collector{
|
||||
horizontalRunnerAutoscalerMinReplicas,
|
||||
horizontalRunnerAutoscalerMaxReplicas,
|
||||
horizontalRunnerAutoscalerDesiredReplicas,
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
horizontalRunnerAutoscalerMinReplicas = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "horizontalrunnerautoscaler_spec_min_replicas",
|
||||
Help: "minReplicas of HorizontalRunnerAutoscaler",
|
||||
},
|
||||
[]string{hraName, hraNamespace},
|
||||
)
|
||||
horizontalRunnerAutoscalerMaxReplicas = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "horizontalrunnerautoscaler_spec_max_replicas",
|
||||
Help: "maxReplicas of HorizontalRunnerAutoscaler",
|
||||
},
|
||||
[]string{hraName, hraNamespace},
|
||||
)
|
||||
horizontalRunnerAutoscalerDesiredReplicas = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "horizontalrunnerautoscaler_status_desired_replicas",
|
||||
Help: "desiredReplicas of HorizontalRunnerAutoscaler",
|
||||
},
|
||||
[]string{hraName, hraNamespace},
|
||||
)
|
||||
)
|
||||
|
||||
func SetHorizontalRunnerAutoscalerSpec(o metav1.ObjectMeta, spec v1alpha1.HorizontalRunnerAutoscalerSpec) {
|
||||
labels := prometheus.Labels{
|
||||
hraName: o.Name,
|
||||
hraNamespace: o.Namespace,
|
||||
}
|
||||
if spec.MaxReplicas != nil {
|
||||
horizontalRunnerAutoscalerMaxReplicas.With(labels).Set(float64(*spec.MaxReplicas))
|
||||
}
|
||||
if spec.MinReplicas != nil {
|
||||
horizontalRunnerAutoscalerMinReplicas.With(labels).Set(float64(*spec.MinReplicas))
|
||||
}
|
||||
}
|
||||
|
||||
func SetHorizontalRunnerAutoscalerStatus(o metav1.ObjectMeta, status v1alpha1.HorizontalRunnerAutoscalerStatus) {
|
||||
labels := prometheus.Labels{
|
||||
hraName: o.Name,
|
||||
hraNamespace: o.Namespace,
|
||||
}
|
||||
if status.DesiredReplicas != nil {
|
||||
horizontalRunnerAutoscalerDesiredReplicas.With(labels).Set(float64(*status.DesiredReplicas))
|
||||
}
|
||||
}
|
||||
14
controllers/metrics/metrics.go
Normal file
14
controllers/metrics/metrics.go
Normal file
@@ -0,0 +1,14 @@
|
||||
// Package metrics provides the metrics of custom resources such as HRA.
|
||||
//
|
||||
// This depends on the metrics exporter of kubebuilder.
|
||||
// See https://book.kubebuilder.io/reference/metrics.html for details.
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
)
|
||||
|
||||
func init() {
|
||||
metrics.Registry.MustRegister(runnerDeploymentMetrics...)
|
||||
metrics.Registry.MustRegister(horizontalRunnerAutoscalerMetrics...)
|
||||
}
|
||||
37
controllers/metrics/runnerdeployment.go
Normal file
37
controllers/metrics/runnerdeployment.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
rdName = "runnerdeployment"
|
||||
rdNamespace = "namespace"
|
||||
)
|
||||
|
||||
var (
|
||||
runnerDeploymentMetrics = []prometheus.Collector{
|
||||
runnerDeploymentReplicas,
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
runnerDeploymentReplicas = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "runnerdeployment_spec_replicas",
|
||||
Help: "replicas of RunnerDeployment",
|
||||
},
|
||||
[]string{rdName, rdNamespace},
|
||||
)
|
||||
)
|
||||
|
||||
func SetRunnerDeployment(rd v1alpha1.RunnerDeployment) {
|
||||
labels := prometheus.Labels{
|
||||
rdName: rd.Name,
|
||||
rdNamespace: rd.Namespace,
|
||||
}
|
||||
if rd.Spec.Replicas != nil {
|
||||
runnerDeploymentReplicas.With(labels).Set(float64(*rd.Spec.Replicas))
|
||||
}
|
||||
}
|
||||
37
controllers/metrics/runnerset.go
Normal file
37
controllers/metrics/runnerset.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
rsName = "runnerset"
|
||||
rsNamespace = "namespace"
|
||||
)
|
||||
|
||||
var (
|
||||
runnerSetMetrics = []prometheus.Collector{
|
||||
runnerSetReplicas,
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
runnerSetReplicas = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "runnerset_spec_replicas",
|
||||
Help: "replicas of RunnerSet",
|
||||
},
|
||||
[]string{rsName, rsNamespace},
|
||||
)
|
||||
)
|
||||
|
||||
func SetRunnerSet(rd v1alpha1.RunnerSet) {
|
||||
labels := prometheus.Labels{
|
||||
rsName: rd.Name,
|
||||
rsNamespace: rd.Namespace,
|
||||
}
|
||||
if rd.Spec.Replicas != nil {
|
||||
runnerSetReplicas.With(labels).Set(float64(*rd.Spec.Replicas))
|
||||
}
|
||||
}
|
||||
132
controllers/pod_runner_token_injector.go
Normal file
132
controllers/pod_runner_token_injector.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
"github.com/go-logr/logr"
|
||||
"gomodules.xyz/jsonpatch/v2"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
)
|
||||
|
||||
const (
|
||||
AnnotationKeyTokenExpirationDate = "actions-runner-controller/token-expires-at"
|
||||
)
|
||||
|
||||
// +kubebuilder:webhook:path=/mutate-runner-set-pod,mutating=true,failurePolicy=ignore,groups="",resources=pods,verbs=create,versions=v1,name=mutate-runner-pod.webhook.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
|
||||
|
||||
type PodRunnerTokenInjector struct {
|
||||
client.Client
|
||||
|
||||
Name string
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
GitHubClient *github.Client
|
||||
decoder *admission.Decoder
|
||||
}
|
||||
|
||||
func (t *PodRunnerTokenInjector) Handle(ctx context.Context, req admission.Request) admission.Response {
|
||||
var pod corev1.Pod
|
||||
err := t.decoder.Decode(req, &pod)
|
||||
if err != nil {
|
||||
t.Log.Error(err, "Failed to decode request object")
|
||||
return admission.Errored(http.StatusBadRequest, err)
|
||||
}
|
||||
|
||||
if pod.Annotations == nil {
|
||||
pod.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
var runnerContainer *corev1.Container
|
||||
|
||||
for i := range pod.Spec.Containers {
|
||||
c := pod.Spec.Containers[i]
|
||||
|
||||
if c.Name == "runner" {
|
||||
runnerContainer = &c
|
||||
}
|
||||
}
|
||||
|
||||
if runnerContainer == nil {
|
||||
return newEmptyResponse()
|
||||
}
|
||||
|
||||
enterprise, okEnterprise := getEnv(runnerContainer, "RUNNER_ENTERPRISE")
|
||||
repo, okRepo := getEnv(runnerContainer, "RUNNER_REPO")
|
||||
org, okOrg := getEnv(runnerContainer, "RUNNER_ORG")
|
||||
if !okRepo || !okOrg || !okEnterprise {
|
||||
return newEmptyResponse()
|
||||
}
|
||||
|
||||
rt, err := t.GitHubClient.GetRegistrationToken(context.Background(), enterprise, org, repo, pod.Name)
|
||||
if err != nil {
|
||||
t.Log.Error(err, "Failed to get new registration token")
|
||||
return admission.Errored(http.StatusInternalServerError, err)
|
||||
}
|
||||
|
||||
ts := rt.GetExpiresAt().Format(time.RFC3339)
|
||||
|
||||
updated := mutatePod(&pod, *rt.Token)
|
||||
|
||||
updated.Annotations[AnnotationKeyTokenExpirationDate] = ts
|
||||
|
||||
if pod.Spec.RestartPolicy != corev1.RestartPolicyOnFailure {
|
||||
updated.Spec.RestartPolicy = corev1.RestartPolicyOnFailure
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(updated)
|
||||
if err != nil {
|
||||
t.Log.Error(err, "Failed to encode new object")
|
||||
return admission.Errored(http.StatusInternalServerError, err)
|
||||
}
|
||||
|
||||
res := admission.PatchResponseFromRaw(req.Object.Raw, buf)
|
||||
return res
|
||||
}
|
||||
|
||||
func getEnv(container *corev1.Container, key string) (string, bool) {
|
||||
for _, env := range container.Env {
|
||||
if env.Name == key {
|
||||
return env.Value, true
|
||||
}
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (t *PodRunnerTokenInjector) InjectDecoder(d *admission.Decoder) error {
|
||||
t.decoder = d
|
||||
return nil
|
||||
}
|
||||
|
||||
func newEmptyResponse() admission.Response {
|
||||
pt := admissionv1.PatchTypeJSONPatch
|
||||
return admission.Response{
|
||||
Patches: []jsonpatch.Operation{},
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
PatchType: &pt,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *PodRunnerTokenInjector) SetupWithManager(mgr ctrl.Manager) error {
|
||||
name := "pod-runner-token-injector"
|
||||
if r.Name != "" {
|
||||
name = r.Name
|
||||
}
|
||||
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
mgr.GetWebhookServer().Register("/mutate-runner-set-pod", &admission.Webhook{Handler: r})
|
||||
|
||||
return nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
431
controllers/runner_pod_controller.go
Normal file
431
controllers/runner_pod_controller.go
Normal file
@@ -0,0 +1,431 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
gogithub "github.com/google/go-github/v33/github"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
)
|
||||
|
||||
// RunnerPodReconciler reconciles a Runner object
|
||||
type RunnerPodReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
GitHubClient *github.Client
|
||||
Name string
|
||||
RegistrationRecheckInterval time.Duration
|
||||
RegistrationRecheckJitter time.Duration
|
||||
}
|
||||
|
||||
const (
|
||||
// This names requires at leaset one slash to work.
|
||||
// See https://github.com/google/knative-gcp/issues/378
|
||||
runnerPodFinalizerName = "actions.summerwind.dev/runner-pod"
|
||||
|
||||
AnnotationKeyLastRegistrationCheckTime = "actions-runner-controller/last-registration-check-time"
|
||||
)
|
||||
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
|
||||
func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("runnerpod", req.NamespacedName)
|
||||
|
||||
var runnerPod corev1.Pod
|
||||
if err := r.Get(ctx, req.NamespacedName, &runnerPod); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
_, isRunnerPod := runnerPod.Labels[LabelKeyRunnerSetName]
|
||||
if !isRunnerPod {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
var enterprise, org, repo string
|
||||
|
||||
envvars := runnerPod.Spec.Containers[0].Env
|
||||
for _, e := range envvars {
|
||||
switch e.Name {
|
||||
case EnvVarEnterprise:
|
||||
enterprise = e.Value
|
||||
case EnvVarOrg:
|
||||
org = e.Value
|
||||
case EnvVarRepo:
|
||||
repo = e.Value
|
||||
}
|
||||
}
|
||||
|
||||
if runnerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
finalizers, added := addFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||
|
||||
if added {
|
||||
newRunner := runnerPod.DeepCopy()
|
||||
newRunner.ObjectMeta.Finalizers = finalizers
|
||||
|
||||
if err := r.Patch(ctx, newRunner, client.MergeFrom(&runnerPod)); err != nil {
|
||||
log.Error(err, "Failed to update runner")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
} else {
|
||||
finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||
|
||||
if removed {
|
||||
ok, err := r.unregisterRunner(ctx, enterprise, org, repo, runnerPod.Name)
|
||||
if err != nil {
|
||||
if errors.Is(err, &gogithub.RateLimitError{}) {
|
||||
// We log the underlying error when we failed calling GitHub API to list or unregisters,
|
||||
// or the runner is still busy.
|
||||
log.Error(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Failed to unregister runner due to GitHub API rate limits. Delaying retry for %s to avoid excessive GitHub API calls",
|
||||
retryDelayOnGitHubAPIRateLimitError,
|
||||
),
|
||||
)
|
||||
|
||||
return ctrl.Result{RequeueAfter: retryDelayOnGitHubAPIRateLimitError}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
log.V(1).Info("Runner no longer exists on GitHub")
|
||||
}
|
||||
|
||||
newRunner := runnerPod.DeepCopy()
|
||||
newRunner.ObjectMeta.Finalizers = finalizers
|
||||
|
||||
if err := r.Patch(ctx, newRunner, client.MergeFrom(&runnerPod)); err != nil {
|
||||
log.Error(err, "Failed to update runner for finalizer removal")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Removed runner from GitHub", "repository", repo, "organization", org)
|
||||
}
|
||||
|
||||
deletionTimeout := 1 * time.Minute
|
||||
currentTime := time.Now()
|
||||
deletionDidTimeout := currentTime.Sub(runnerPod.DeletionTimestamp.Add(deletionTimeout)) > 0
|
||||
|
||||
if deletionDidTimeout {
|
||||
log.Info(
|
||||
fmt.Sprintf("Failed to delete pod within %s. ", deletionTimeout)+
|
||||
"This is typically the case when a Kubernetes node became unreachable "+
|
||||
"and the kube controller started evicting nodes. Forcefully deleting the pod to not get stuck.",
|
||||
"podDeletionTimestamp", runnerPod.DeletionTimestamp,
|
||||
"currentTime", currentTime,
|
||||
"configuredDeletionTimeout", deletionTimeout,
|
||||
)
|
||||
|
||||
var force int64 = 0
|
||||
// forcefully delete runner as we would otherwise get stuck if the node stays unreachable
|
||||
if err := r.Delete(ctx, &runnerPod, &client.DeleteOptions{GracePeriodSeconds: &force}); err != nil {
|
||||
// probably
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Failed to forcefully delete pod resource ...")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
// forceful deletion finally succeeded
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
r.Recorder.Event(&runnerPod, corev1.EventTypeNormal, "PodDeleted", fmt.Sprintf("Forcefully deleted pod '%s'", runnerPod.Name))
|
||||
log.Info("Forcefully deleted runner pod", "repository", repo)
|
||||
// give kube manager a little time to forcefully delete the stuck pod
|
||||
return ctrl.Result{RequeueAfter: 3 * time.Second}, nil
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// If pod has ended up succeeded we need to restart it
|
||||
// Happens e.g. when dind is in runner and run completes
|
||||
stopped := runnerPod.Status.Phase == corev1.PodSucceeded
|
||||
|
||||
if !stopped {
|
||||
if runnerPod.Status.Phase == corev1.PodRunning {
|
||||
for _, status := range runnerPod.Status.ContainerStatuses {
|
||||
if status.Name != containerName {
|
||||
continue
|
||||
}
|
||||
|
||||
if status.State.Terminated != nil && status.State.Terminated.ExitCode == 0 {
|
||||
stopped = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
restart := stopped
|
||||
|
||||
var registrationRecheckDelay time.Duration
|
||||
|
||||
// all checks done below only decide whether a restart is needed
|
||||
// if a restart was already decided before, there is no need for the checks
|
||||
// saving API calls and scary log messages
|
||||
if !restart {
|
||||
registrationCheckInterval := time.Minute
|
||||
if r.RegistrationRecheckInterval > 0 {
|
||||
registrationCheckInterval = r.RegistrationRecheckInterval
|
||||
}
|
||||
|
||||
lastCheckTimeStr := runnerPod.Annotations[AnnotationKeyLastRegistrationCheckTime]
|
||||
|
||||
var lastCheckTime *time.Time
|
||||
|
||||
if lastCheckTimeStr != "" {
|
||||
t, err := time.Parse(time.RFC3339, lastCheckTimeStr)
|
||||
if err != nil {
|
||||
log.Error(err, "failed to parase last check time %q", lastCheckTimeStr)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
lastCheckTime = &t
|
||||
}
|
||||
|
||||
// We want to call ListRunners GitHub Actions API only once per runner per minute.
|
||||
// This if block, in conjunction with:
|
||||
// return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil
|
||||
// achieves that.
|
||||
if lastCheckTime != nil {
|
||||
nextCheckTime := lastCheckTime.Add(registrationCheckInterval)
|
||||
now := time.Now()
|
||||
|
||||
// Requeue scheduled by RequeueAfter can happen a bit earlier (like dozens of milliseconds)
|
||||
// so to avoid excessive, in-effective retry, we heuristically ignore the remaining delay in case it is
|
||||
// shorter than 1s
|
||||
requeueAfter := nextCheckTime.Sub(now) - time.Second
|
||||
if requeueAfter > 0 {
|
||||
log.Info(
|
||||
fmt.Sprintf("Skipped registration check because it's deferred until %s. Retrying in %s at latest", nextCheckTime, requeueAfter),
|
||||
"lastRegistrationCheckTime", lastCheckTime,
|
||||
"registrationCheckInterval", registrationCheckInterval,
|
||||
)
|
||||
|
||||
// Without RequeueAfter, the controller may not retry on scheduled. Instead, it must wait until the
|
||||
// next sync period passes, which can be too much later than nextCheckTime.
|
||||
//
|
||||
// We need to requeue on this reconcilation even though we have already scheduled the initial
|
||||
// requeue previously with `return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil`.
|
||||
// Apparently, the workqueue used by controller-runtime seems to deduplicate and resets the delay on
|
||||
// other requeues- so the initial scheduled requeue may have been reset due to requeue on
|
||||
// spec/status change.
|
||||
return ctrl.Result{RequeueAfter: requeueAfter}, nil
|
||||
}
|
||||
}
|
||||
|
||||
notFound := false
|
||||
offline := false
|
||||
|
||||
_, err := r.GitHubClient.IsRunnerBusy(ctx, enterprise, org, repo, runnerPod.Name)
|
||||
|
||||
currentTime := time.Now()
|
||||
|
||||
if err != nil {
|
||||
var notFoundException *github.RunnerNotFound
|
||||
var offlineException *github.RunnerOffline
|
||||
if errors.As(err, ¬FoundException) {
|
||||
notFound = true
|
||||
} else if errors.As(err, &offlineException) {
|
||||
offline = true
|
||||
} else {
|
||||
var e *gogithub.RateLimitError
|
||||
if errors.As(err, &e) {
|
||||
// We log the underlying error when we failed calling GitHub API to list or unregisters,
|
||||
// or the runner is still busy.
|
||||
log.Error(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Failed to check if runner is busy due to Github API rate limit. Retrying in %s to avoid excessive GitHub API calls",
|
||||
retryDelayOnGitHubAPIRateLimitError,
|
||||
),
|
||||
)
|
||||
|
||||
return ctrl.Result{RequeueAfter: retryDelayOnGitHubAPIRateLimitError}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
registrationTimeout := 10 * time.Minute
|
||||
durationAfterRegistrationTimeout := currentTime.Sub(runnerPod.CreationTimestamp.Add(registrationTimeout))
|
||||
registrationDidTimeout := durationAfterRegistrationTimeout > 0
|
||||
|
||||
if notFound {
|
||||
if registrationDidTimeout {
|
||||
log.Info(
|
||||
"Runner failed to register itself to GitHub in timely manner. "+
|
||||
"Recreating the pod to see if it resolves the issue. "+
|
||||
"CAUTION: If you see this a lot, you should investigate the root cause. "+
|
||||
"See https://github.com/actions-runner-controller/actions-runner-controller/issues/288",
|
||||
"podCreationTimestamp", runnerPod.CreationTimestamp,
|
||||
"currentTime", currentTime,
|
||||
"configuredRegistrationTimeout", registrationTimeout,
|
||||
)
|
||||
|
||||
restart = true
|
||||
} else {
|
||||
log.V(1).Info(
|
||||
"Runner pod exists but we failed to check if runner is busy. Apparently it still needs more time.",
|
||||
"runnerName", runnerPod.Name,
|
||||
)
|
||||
}
|
||||
} else if offline {
|
||||
if registrationDidTimeout {
|
||||
log.Info(
|
||||
"Already existing GitHub runner still appears offline . "+
|
||||
"Recreating the pod to see if it resolves the issue. "+
|
||||
"CAUTION: If you see this a lot, you should investigate the root cause. ",
|
||||
"podCreationTimestamp", runnerPod.CreationTimestamp,
|
||||
"currentTime", currentTime,
|
||||
"configuredRegistrationTimeout", registrationTimeout,
|
||||
)
|
||||
|
||||
restart = true
|
||||
} else {
|
||||
log.V(1).Info(
|
||||
"Runner pod exists but the GitHub runner appears to be still offline. Waiting for runner to get online ...",
|
||||
"runnerName", runnerPod.Name,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if (notFound || offline) && !registrationDidTimeout {
|
||||
registrationRecheckJitter := 10 * time.Second
|
||||
if r.RegistrationRecheckJitter > 0 {
|
||||
registrationRecheckJitter = r.RegistrationRecheckJitter
|
||||
}
|
||||
|
||||
registrationRecheckDelay = registrationCheckInterval + wait.Jitter(registrationRecheckJitter, 0.1)
|
||||
}
|
||||
}
|
||||
|
||||
// Don't do anything if there's no need to restart the runner
|
||||
if !restart {
|
||||
// This guard enables us to update runner.Status.Phase to `Running` only after
|
||||
// the runner is registered to GitHub.
|
||||
if registrationRecheckDelay > 0 {
|
||||
log.V(1).Info(fmt.Sprintf("Rechecking the runner registration in %s", registrationRecheckDelay))
|
||||
|
||||
updated := runnerPod.DeepCopy()
|
||||
t := time.Now().Format(time.RFC3339)
|
||||
updated.Annotations[AnnotationKeyLastRegistrationCheckTime] = t
|
||||
|
||||
if err := r.Patch(ctx, updated, client.MergeFrom(&runnerPod)); err != nil {
|
||||
log.Error(err, "Failed to update runner pod annotation for LastRegistrationCheckTime")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil
|
||||
}
|
||||
|
||||
// Seeing this message, you can expect the runner to become `Running` soon.
|
||||
log.Info(
|
||||
"Runner appears to have registered and running.",
|
||||
"podCreationTimestamp", runnerPod.CreationTimestamp,
|
||||
)
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Delete current pod if recreation is needed
|
||||
if err := r.Delete(ctx, &runnerPod); err != nil {
|
||||
log.Error(err, "Failed to delete pod resource")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
r.Recorder.Event(&runnerPod, corev1.EventTypeNormal, "PodDeleted", fmt.Sprintf("Deleted pod '%s'", runnerPod.Name))
|
||||
log.Info("Deleted runner pod", "name", runnerPod.Name)
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *RunnerPodReconciler) unregisterRunner(ctx context.Context, enterprise, org, repo, name string) (bool, error) {
|
||||
runners, err := r.GitHubClient.ListRunners(ctx, enterprise, org, repo)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var busy bool
|
||||
|
||||
id := int64(0)
|
||||
for _, runner := range runners {
|
||||
if runner.GetName() == name {
|
||||
// Sometimes a runner can stuck "busy" even though it is already "offline".
|
||||
// Thus removing the condition on status can block the runner pod from being terminated forever.
|
||||
busy = runner.GetBusy()
|
||||
if runner.GetStatus() != "offline" && busy {
|
||||
r.Log.Info("This runner will delay the runner pod deletion and the runner deregistration until it becomes either offline or non-busy", "name", runner.GetName(), "status", runner.GetStatus(), "busy", runner.GetBusy())
|
||||
return false, fmt.Errorf("runner is busy")
|
||||
}
|
||||
id = runner.GetID()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if id == int64(0) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Sometimes a runner can stuck "busy" even though it is already "offline".
|
||||
// Trying to remove the offline but busy runner can result in errors like the following:
|
||||
// failed to remove runner: DELETE https://api.github.com/repos/actions-runner-controller/mumoshu-actions-test/actions/runners/47: 422 Bad request - Runner \"example-runnerset-0\" is still running a job\" []
|
||||
if !busy {
|
||||
if err := r.GitHubClient.RemoveRunner(ctx, enterprise, org, repo, id); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *RunnerPodReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
name := "runnerpod-controller"
|
||||
if r.Name != "" {
|
||||
name = r.Name
|
||||
}
|
||||
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&corev1.Pod{}).
|
||||
Named(name).
|
||||
Complete(r)
|
||||
}
|
||||
@@ -37,11 +37,13 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/controllers/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
||||
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
||||
LabelKeyRunnerDeploymentName = "runner-deployment-name"
|
||||
|
||||
runnerSetOwnerKey = ".metadata.controller"
|
||||
)
|
||||
@@ -63,8 +65,7 @@ type RunnerDeploymentReconciler struct {
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerreplicasets/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
|
||||
func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
ctx := context.Background()
|
||||
func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("runnerdeployment", req.NamespacedName)
|
||||
|
||||
var rd v1alpha1.RunnerDeployment
|
||||
@@ -76,6 +77,8 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
metrics.SetRunnerDeployment(rd)
|
||||
|
||||
var myRunnerReplicaSetList v1alpha1.RunnerReplicaSetList
|
||||
if err := r.List(ctx, &myRunnerReplicaSetList, client.InNamespace(req.Namespace), client.MatchingFields{runnerSetOwnerKey: req.Name}); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
@@ -151,7 +154,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
// A selector update change doesn't trigger replicaset replacement,
|
||||
// but we still need to update the existing replicaset with it.
|
||||
// Otherwise selector-based runner query will never work on replicasets created before the controller v0.17.0
|
||||
// See https://github.com/summerwind/actions-runner-controller/pull/355#discussion_r585379259
|
||||
// See https://github.com/actions-runner-controller/actions-runner-controller/pull/355#discussion_r585379259
|
||||
if err := r.Client.Update(ctx, updateSet); err != nil {
|
||||
log.Error(err, "Failed to update runnerreplicaset resource")
|
||||
|
||||
@@ -184,18 +187,35 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Do we old runner replica sets that should eventually deleted?
|
||||
// Do we have old runner replica sets that should eventually deleted?
|
||||
if len(oldSets) > 0 {
|
||||
readyReplicas := newestSet.Status.ReadyReplicas
|
||||
var readyReplicas int
|
||||
if newestSet.Status.ReadyReplicas != nil {
|
||||
readyReplicas = *newestSet.Status.ReadyReplicas
|
||||
}
|
||||
|
||||
if readyReplicas < currentDesiredReplicas {
|
||||
log.WithValues("runnerreplicaset", types.NamespacedName{
|
||||
oldSetsCount := len(oldSets)
|
||||
|
||||
logWithDebugInfo := log.WithValues(
|
||||
"newest_runnerreplicaset", types.NamespacedName{
|
||||
Namespace: newestSet.Namespace,
|
||||
Name: newestSet.Name,
|
||||
}).
|
||||
Info("Waiting until the newest runner replica set to be 100% available")
|
||||
},
|
||||
"newest_runnerreplicaset_replicas_ready", readyReplicas,
|
||||
"newest_runnerreplicaset_replicas_desired", currentDesiredReplicas,
|
||||
"old_runnerreplicasets_count", oldSetsCount,
|
||||
)
|
||||
|
||||
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
|
||||
if readyReplicas < currentDesiredReplicas {
|
||||
logWithDebugInfo.
|
||||
Info("Waiting until the newest runnerreplicaset to be 100% available")
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if oldSetsCount > 0 {
|
||||
logWithDebugInfo.
|
||||
Info("The newest runnerreplicaset is 100% available. Deleting old runnerreplicasets")
|
||||
}
|
||||
|
||||
for i := range oldSets {
|
||||
@@ -213,14 +233,49 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
}
|
||||
}
|
||||
|
||||
if rd.Spec.Replicas == nil && desiredRS.Spec.Replicas != nil {
|
||||
var replicaSets []v1alpha1.RunnerReplicaSet
|
||||
|
||||
replicaSets = append(replicaSets, *newestSet)
|
||||
replicaSets = append(replicaSets, oldSets...)
|
||||
|
||||
var totalCurrentReplicas, totalStatusAvailableReplicas, updatedReplicas int
|
||||
|
||||
for _, rs := range replicaSets {
|
||||
var current, available int
|
||||
|
||||
if rs.Status.Replicas != nil {
|
||||
current = *rs.Status.Replicas
|
||||
}
|
||||
|
||||
if rs.Status.AvailableReplicas != nil {
|
||||
available = *rs.Status.AvailableReplicas
|
||||
}
|
||||
|
||||
totalCurrentReplicas += current
|
||||
totalStatusAvailableReplicas += available
|
||||
}
|
||||
|
||||
if newestSet.Status.Replicas != nil {
|
||||
updatedReplicas = *newestSet.Status.Replicas
|
||||
}
|
||||
|
||||
var status v1alpha1.RunnerDeploymentStatus
|
||||
|
||||
status.AvailableReplicas = &totalStatusAvailableReplicas
|
||||
status.ReadyReplicas = &totalStatusAvailableReplicas
|
||||
status.DesiredReplicas = &newDesiredReplicas
|
||||
status.Replicas = &totalCurrentReplicas
|
||||
status.UpdatedReplicas = &updatedReplicas
|
||||
|
||||
if !reflect.DeepEqual(rd.Status, status) {
|
||||
updated := rd.DeepCopy()
|
||||
updated.Status.Replicas = desiredRS.Spec.Replicas
|
||||
updated.Status = status
|
||||
|
||||
if err := r.Status().Update(ctx, updated); err != nil {
|
||||
log.Error(err, "Failed to update runnerdeployment status")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&rd)); err != nil {
|
||||
log.Info("Failed to patch runnerdeployment status. Retrying immediately", "error", err.Error())
|
||||
return ctrl.Result{
|
||||
Requeue: true,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -326,23 +381,32 @@ func (r *RunnerDeploymentReconciler) newRunnerReplicaSet(rd v1alpha1.RunnerDeplo
|
||||
return newRunnerReplicaSet(&rd, r.CommonRunnerLabels, r.Scheme)
|
||||
}
|
||||
|
||||
func getSelector(rd *v1alpha1.RunnerDeployment) *metav1.LabelSelector {
|
||||
selector := rd.Spec.Selector
|
||||
if selector == nil {
|
||||
selector = &metav1.LabelSelector{MatchLabels: map[string]string{LabelKeyRunnerDeploymentName: rd.Name}}
|
||||
}
|
||||
|
||||
return selector
|
||||
}
|
||||
|
||||
func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []string, scheme *runtime.Scheme) (*v1alpha1.RunnerReplicaSet, error) {
|
||||
newRSTemplate := *rd.Spec.Template.DeepCopy()
|
||||
|
||||
templateHash := ComputeHash(&newRSTemplate)
|
||||
// Add template hash label to selector.
|
||||
labels := CloneAndAddLabel(rd.Spec.Template.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
|
||||
for _, l := range commonRunnerLabels {
|
||||
newRSTemplate.Spec.Labels = append(newRSTemplate.Spec.Labels, l)
|
||||
}
|
||||
|
||||
newRSTemplate.Labels = labels
|
||||
templateHash := ComputeHash(&newRSTemplate)
|
||||
|
||||
// Add template hash label to selector.
|
||||
newRSTemplate.ObjectMeta.Labels = CloneAndAddLabel(newRSTemplate.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
|
||||
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||
newRSTemplate.ObjectMeta.Labels = CloneAndAddLabel(newRSTemplate.ObjectMeta.Labels, LabelKeyRunnerDeploymentName, rd.Name)
|
||||
|
||||
selector := getSelector(rd)
|
||||
|
||||
selector := rd.Spec.Selector
|
||||
if selector == nil {
|
||||
selector = &metav1.LabelSelector{MatchLabels: labels}
|
||||
}
|
||||
newRSSelector := CloneSelectorAndAddLabel(selector, LabelKeyRunnerTemplateHash, templateHash)
|
||||
|
||||
rs := v1alpha1.RunnerReplicaSet{
|
||||
@@ -350,7 +414,7 @@ func newRunnerReplicaSet(rd *v1alpha1.RunnerDeployment, commonRunnerLabels []str
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: rd.ObjectMeta.Name + "-",
|
||||
Namespace: rd.ObjectMeta.Namespace,
|
||||
Labels: labels,
|
||||
Labels: newRSTemplate.ObjectMeta.Labels,
|
||||
},
|
||||
Spec: v1alpha1.RunnerReplicaSetSpec{
|
||||
Replicas: rd.Spec.Replicas,
|
||||
@@ -374,7 +438,7 @@ func (r *RunnerDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.RunnerReplicaSet{}, runnerSetOwnerKey, func(rawObj runtime.Object) []string {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &v1alpha1.RunnerReplicaSet{}, runnerSetOwnerKey, func(rawObj client.Object) []string {
|
||||
runnerSet := rawObj.(*v1alpha1.RunnerReplicaSet)
|
||||
owner := metav1.GetControllerOf(runnerSet)
|
||||
if owner == nil {
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
)
|
||||
|
||||
func TestNewRunnerReplicaSet(t *testing.T) {
|
||||
@@ -50,7 +50,9 @@ func TestNewRunnerReplicaSet(t *testing.T) {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Labels: []string{"project1"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Labels: []string{"project1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -126,12 +128,13 @@ func TestNewRunnerReplicaSet(t *testing.T) {
|
||||
// * starting the 'RunnerDeploymentReconciler'
|
||||
// * stopping the 'RunnerDeploymentReconciler" after the test ends
|
||||
// Call this function at the start of each of your tests.
|
||||
func SetupDeploymentTest(ctx context.Context) *corev1.Namespace {
|
||||
var stopCh chan struct{}
|
||||
func SetupDeploymentTest(ctx2 context.Context) *corev1.Namespace {
|
||||
var ctx context.Context
|
||||
var cancel func()
|
||||
ns := &corev1.Namespace{}
|
||||
|
||||
BeforeEach(func() {
|
||||
stopCh = make(chan struct{})
|
||||
ctx, cancel = context.WithCancel(ctx2)
|
||||
*ns = corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
|
||||
}
|
||||
@@ -139,7 +142,9 @@ func SetupDeploymentTest(ctx context.Context) *corev1.Namespace {
|
||||
err := k8sClient.Create(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Namespace: ns.Name,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||
|
||||
controller := &RunnerDeploymentReconciler{
|
||||
@@ -155,13 +160,13 @@ func SetupDeploymentTest(ctx context.Context) *corev1.Namespace {
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err := mgr.Start(stopCh)
|
||||
err := mgr.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
||||
}()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
close(stopCh)
|
||||
defer cancel()
|
||||
|
||||
err := k8sClient.Delete(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace")
|
||||
@@ -199,10 +204,14 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "foo/bar",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -295,10 +304,14 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
Replicas: intPtr(1),
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "foo/bar",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -391,10 +404,14 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
Replicas: intPtr(1),
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "foo/bar",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
gogithub "github.com/google/go-github/v33/github"
|
||||
@@ -34,8 +35,8 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/summerwind/actions-runner-controller/github"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||
)
|
||||
|
||||
// RunnerReplicaSetReconciler reconciles a Runner object
|
||||
@@ -55,8 +56,7 @@ type RunnerReplicaSetReconciler struct {
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
|
||||
func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
ctx := context.Background()
|
||||
func (r *RunnerReplicaSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("runnerreplicaset", req.NamespacedName)
|
||||
|
||||
var rs v1alpha1.RunnerReplicaSet
|
||||
@@ -88,20 +88,23 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
var myRunners []v1alpha1.Runner
|
||||
|
||||
var (
|
||||
available int
|
||||
current int
|
||||
ready int
|
||||
available int
|
||||
)
|
||||
|
||||
for _, r := range allRunners.Items {
|
||||
// This guard is required to avoid the RunnerReplicaSet created by the controller v0.17.0 or before
|
||||
// to not treat all the runners in the namespace as its children.
|
||||
if metav1.IsControlledBy(&r, &rs) {
|
||||
if metav1.IsControlledBy(&r, &rs) && !metav1.HasAnnotation(r.ObjectMeta, annotationKeyRegistrationOnly) {
|
||||
myRunners = append(myRunners, r)
|
||||
|
||||
available += 1
|
||||
current += 1
|
||||
|
||||
if r.Status.Phase == string(corev1.PodRunning) {
|
||||
ready += 1
|
||||
// available is currently the same as ready, as we don't yet have minReadySeconds for runners
|
||||
available += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -114,13 +117,79 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
desired = 1
|
||||
}
|
||||
|
||||
log.V(0).Info("debug", "desired", desired, "available", available)
|
||||
registrationOnlyRunnerNsName := req.NamespacedName
|
||||
registrationOnlyRunnerNsName.Name = registrationOnlyRunnerNameFor(rs.Name)
|
||||
registrationOnlyRunner := v1alpha1.Runner{}
|
||||
registrationOnlyRunnerExists := false
|
||||
if err := r.Get(
|
||||
ctx,
|
||||
registrationOnlyRunnerNsName,
|
||||
®istrationOnlyRunner,
|
||||
); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
} else {
|
||||
registrationOnlyRunnerExists = true
|
||||
}
|
||||
|
||||
if available > desired {
|
||||
n := available - desired
|
||||
// On scale to zero, we must have fully registered registration-only runner before we start deleting other runners, hence `desired == 0`
|
||||
// On scale from zero, we must retain the registratoin-only runner until one or more other runners get registered, hence `registrationOnlyRunnerExists && available == 0`.
|
||||
// On RunnerReplicaSet creation, it have always 0 replics and no registration-only runner.
|
||||
// In this case We don't need to bother creating a registration-only runner which gets deleted soon after we have 1 or more available repolicas,
|
||||
// hence it's not `available == 0`, but `registrationOnlyRunnerExists && available == 0`.
|
||||
// See https://github.com/actions-runner-controller/actions-runner-controller/issues/516
|
||||
registrationOnlyRunnerNeeded := desired == 0 || (registrationOnlyRunnerExists && current == 0)
|
||||
|
||||
if registrationOnlyRunnerNeeded {
|
||||
if registrationOnlyRunnerExists {
|
||||
if registrationOnlyRunner.Status.Phase == "" {
|
||||
log.Info("Still waiting for the registration-only runner to be registered")
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
} else {
|
||||
// A registration-only runner does not exist and is needed, hence create it.
|
||||
|
||||
runnerForScaleFromToZero, err := r.newRunner(rs)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to create runner for scale from/to zero: %v", err)
|
||||
}
|
||||
|
||||
runnerForScaleFromToZero.ObjectMeta.Name = registrationOnlyRunnerNsName.Name
|
||||
runnerForScaleFromToZero.ObjectMeta.GenerateName = ""
|
||||
runnerForScaleFromToZero.ObjectMeta.Labels = nil
|
||||
metav1.SetMetaDataAnnotation(&runnerForScaleFromToZero.ObjectMeta, annotationKeyRegistrationOnly, "true")
|
||||
|
||||
if err := r.Client.Create(ctx, &runnerForScaleFromToZero); err != nil {
|
||||
log.Error(err, "Failed to create runner for scale from/to zero")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// We can continue to deleting runner pods only after the
|
||||
// registration-only runner gets registered.
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
} else {
|
||||
// A registration-only runner exists and is not needed, hence delete it.
|
||||
if registrationOnlyRunnerExists {
|
||||
if err := r.Client.Delete(ctx, ®istrationOnlyRunner); err != nil {
|
||||
log.Error(err, "Retrying soon because we failed to delete registration-only runner")
|
||||
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if current > desired {
|
||||
n := current - desired
|
||||
|
||||
log.V(0).Info(fmt.Sprintf("Deleting %d runners", n), "desired", desired, "current", current, "ready", ready)
|
||||
|
||||
// get runners that are currently offline/not busy/timed-out to register
|
||||
var deletionCandidates []v1alpha1.Runner
|
||||
|
||||
// get runners that are currently not busy
|
||||
var notBusy []v1alpha1.Runner
|
||||
for _, runner := range allRunners.Items {
|
||||
busy, err := r.GitHubClient.IsRunnerBusy(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
|
||||
if err != nil {
|
||||
@@ -162,40 +231,44 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
"Runner failed to register itself to GitHub in timely manner. "+
|
||||
"Marking the runner for scale down. "+
|
||||
"CAUTION: If you see this a lot, you should investigate the root cause. "+
|
||||
"See https://github.com/summerwind/actions-runner-controller/issues/288",
|
||||
"See https://github.com/actions-runner-controller/actions-runner-controller/issues/288",
|
||||
"runnerCreationTimestamp", runner.CreationTimestamp,
|
||||
"currentTime", currentTime,
|
||||
"configuredRegistrationTimeout", registrationTimeout,
|
||||
)
|
||||
|
||||
notBusy = append(notBusy, runner)
|
||||
deletionCandidates = append(deletionCandidates, runner)
|
||||
}
|
||||
|
||||
// offline runners should always be a great target for scale down
|
||||
if offline {
|
||||
notBusy = append(notBusy, runner)
|
||||
deletionCandidates = append(deletionCandidates, runner)
|
||||
}
|
||||
} else if !busy {
|
||||
notBusy = append(notBusy, runner)
|
||||
deletionCandidates = append(deletionCandidates, runner)
|
||||
}
|
||||
}
|
||||
|
||||
if len(notBusy) < n {
|
||||
n = len(notBusy)
|
||||
if len(deletionCandidates) < n {
|
||||
n = len(deletionCandidates)
|
||||
}
|
||||
|
||||
log.V(0).Info(fmt.Sprintf("Deleting %d runner(s)", n), "desired", desired, "current", current, "ready", ready)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
if err := r.Client.Delete(ctx, ¬Busy[i]); client.IgnoreNotFound(err) != nil {
|
||||
if err := r.Client.Delete(ctx, &deletionCandidates[i]); client.IgnoreNotFound(err) != nil {
|
||||
log.Error(err, "Failed to delete runner resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
r.Recorder.Event(&rs, corev1.EventTypeNormal, "RunnerDeleted", fmt.Sprintf("Deleted runner '%s'", notBusy[i].Name))
|
||||
log.Info("Deleted runner", "runnerreplicaset", rs.ObjectMeta.Name)
|
||||
r.Recorder.Event(&rs, corev1.EventTypeNormal, "RunnerDeleted", fmt.Sprintf("Deleted runner '%s'", deletionCandidates[i].Name))
|
||||
log.Info("Deleted runner")
|
||||
}
|
||||
} else if desired > available {
|
||||
n := desired - available
|
||||
} else if desired > current {
|
||||
n := desired - current
|
||||
|
||||
log.V(0).Info(fmt.Sprintf("Creating %d runner(s)", n), "desired", desired, "available", current, "ready", ready)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
newRunner, err := r.newRunner(rs)
|
||||
@@ -213,13 +286,18 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
||||
}
|
||||
}
|
||||
|
||||
if rs.Status.AvailableReplicas != available || rs.Status.ReadyReplicas != ready {
|
||||
updated := rs.DeepCopy()
|
||||
updated.Status.AvailableReplicas = available
|
||||
updated.Status.ReadyReplicas = ready
|
||||
var status v1alpha1.RunnerReplicaSetStatus
|
||||
|
||||
if err := r.Status().Update(ctx, updated); err != nil {
|
||||
log.Info("Failed to update status. Retrying immediately", "error", err.Error())
|
||||
status.Replicas = ¤t
|
||||
status.AvailableReplicas = &available
|
||||
status.ReadyReplicas = &ready
|
||||
|
||||
if !reflect.DeepEqual(rs.Status, status) {
|
||||
updated := rs.DeepCopy()
|
||||
updated.Status = status
|
||||
|
||||
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&rs)); err != nil {
|
||||
log.Info("Failed to update runnerreplicaset status. Retrying immediately", "error", err.Error())
|
||||
return ctrl.Result{
|
||||
Requeue: true,
|
||||
}, nil
|
||||
@@ -262,3 +340,7 @@ func (r *RunnerReplicaSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
Named(name).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func registrationOnlyRunnerNameFor(rsName string) string {
|
||||
return rsName + "-registration-only"
|
||||
}
|
||||
|
||||
@@ -2,15 +2,14 @@ package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http/httptest"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-github/v33/github"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/utils/pointer"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
@@ -19,8 +18,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/summerwind/actions-runner-controller/github/fake"
|
||||
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -34,12 +33,13 @@ var (
|
||||
// * starting the 'RunnerReconciler'
|
||||
// * stopping the 'RunnerReplicaSetReconciler" after the test ends
|
||||
// Call this function at the start of each of your tests.
|
||||
func SetupTest(ctx context.Context) *corev1.Namespace {
|
||||
var stopCh chan struct{}
|
||||
func SetupTest(ctx2 context.Context) *corev1.Namespace {
|
||||
var ctx context.Context
|
||||
var cancel func()
|
||||
ns := &corev1.Namespace{}
|
||||
|
||||
BeforeEach(func() {
|
||||
stopCh = make(chan struct{})
|
||||
ctx, cancel = context.WithCancel(ctx2)
|
||||
*ns = corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
|
||||
}
|
||||
@@ -47,7 +47,9 @@ func SetupTest(ctx context.Context) *corev1.Namespace {
|
||||
err := k8sClient.Create(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Namespace: ns.Name,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||
|
||||
runnersList = fake.NewRunnersList()
|
||||
@@ -68,13 +70,13 @@ func SetupTest(ctx context.Context) *corev1.Namespace {
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err := mgr.Start(stopCh)
|
||||
err := mgr.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
||||
}()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
close(stopCh)
|
||||
defer cancel()
|
||||
|
||||
server.Close()
|
||||
err := k8sClient.Delete(ctx, ns)
|
||||
@@ -127,10 +129,14 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "foo/bar",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -167,15 +173,7 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
return -1
|
||||
}
|
||||
|
||||
for i, runner := range runners.Items {
|
||||
runnersList.Add(&github.Runner{
|
||||
ID: pointer.Int64Ptr(int64(i) + 1),
|
||||
Name: pointer.StringPtr(runner.Name),
|
||||
OS: pointer.StringPtr("linux"),
|
||||
Status: pointer.StringPtr("online"),
|
||||
Busy: pointer.BoolPtr(false),
|
||||
})
|
||||
}
|
||||
runnersList.Sync(runners.Items)
|
||||
|
||||
return len(runners.Items)
|
||||
},
|
||||
@@ -224,15 +222,7 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
logf.Log.Error(err, "list runners")
|
||||
}
|
||||
|
||||
for i, runner := range runners.Items {
|
||||
runnersList.Add(&github.Runner{
|
||||
ID: pointer.Int64Ptr(int64(i) + 1),
|
||||
Name: pointer.StringPtr(runner.Name),
|
||||
OS: pointer.StringPtr("linux"),
|
||||
Status: pointer.StringPtr("online"),
|
||||
Busy: pointer.BoolPtr(false),
|
||||
})
|
||||
}
|
||||
runnersList.Sync(runners.Items)
|
||||
|
||||
return len(runners.Items)
|
||||
},
|
||||
@@ -260,21 +250,35 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
|
||||
Eventually(
|
||||
func() int {
|
||||
err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name))
|
||||
if err != nil {
|
||||
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
var regOnly actionsv1alpha1.Runner
|
||||
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: registrationOnlyRunnerNameFor(name)}, ®Only); err != nil {
|
||||
logf.Log.Info(fmt.Sprintf("Failed getting registration-only runner in test: %v", err))
|
||||
return -1
|
||||
} else {
|
||||
updated := regOnly.DeepCopy()
|
||||
updated.Status.Phase = "Completed"
|
||||
|
||||
if err := k8sClient.Status().Patch(ctx, updated, client.MergeFrom(®Only)); err != nil {
|
||||
logf.Log.Info(fmt.Sprintf("Failed updating registration-only runner in test: %v", err))
|
||||
return -1
|
||||
}
|
||||
|
||||
runnersList.AddOffline([]actionsv1alpha1.Runner{*updated})
|
||||
}
|
||||
|
||||
if err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name), client.MatchingLabelsSelector{Selector: selector}); err != nil {
|
||||
logf.Log.Error(err, "list runners")
|
||||
return -1
|
||||
}
|
||||
|
||||
for i, runner := range runners.Items {
|
||||
runnersList.Add(&github.Runner{
|
||||
ID: pointer.Int64Ptr(int64(i) + 1),
|
||||
Name: pointer.StringPtr(runner.Name),
|
||||
OS: pointer.StringPtr("linux"),
|
||||
Status: pointer.StringPtr("online"),
|
||||
Busy: pointer.BoolPtr(false),
|
||||
})
|
||||
}
|
||||
runnersList.Sync(runners.Items)
|
||||
|
||||
return len(runners.Items)
|
||||
},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user