mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
163 Commits
actions-ru
...
actions-ru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7008b0c257 | ||
|
|
d9df455781 | ||
|
|
7e42d3fa7c | ||
|
|
0593125d96 | ||
|
|
a815c37614 | ||
|
|
3539569fed | ||
|
|
fc131870aa | ||
|
|
382afa4450 | ||
|
|
5125dd7e77 | ||
|
|
2c711506ea | ||
|
|
dfa0f2eef4 | ||
|
|
180db37a9a | ||
|
|
424c33b11f | ||
|
|
34d9c6d4db | ||
|
|
167c5b4dc9 | ||
|
|
91c22ef964 | ||
|
|
5d292ee5ff | ||
|
|
5b4b65664c | ||
|
|
b6465c5d09 | ||
|
|
dc9f9b0bfb | ||
|
|
02e05bdafb | ||
|
|
a9421edd46 | ||
|
|
fb66b28569 | ||
|
|
fabead8c8e | ||
|
|
d528d18211 | ||
|
|
7e593a80ff | ||
|
|
27bdc780a3 | ||
|
|
3948406374 | ||
|
|
743e6d6202 | ||
|
|
29260549fa | ||
|
|
f17edd500b | ||
|
|
14564c7b8e | ||
|
|
7f2795b5d6 | ||
|
|
b27b6ea2a8 | ||
|
|
f858e2e432 | ||
|
|
6f130c2db5 | ||
|
|
dcea0f7f79 | ||
|
|
f19e7ea8a8 | ||
|
|
9437e164b4 | ||
|
|
82d1be7791 | ||
|
|
dbab1a5e92 | ||
|
|
e5a9d50cb6 | ||
|
|
67031acdc4 | ||
|
|
b1bfa8787f | ||
|
|
c78116b0f9 | ||
|
|
4ec57d3e39 | ||
|
|
79543add3f | ||
|
|
7722730dc0 | ||
|
|
044f4ad4ea | ||
|
|
20394be04d | ||
|
|
7a305d2892 | ||
|
|
927d6f03ce | ||
|
|
127a9aa7c4 | ||
|
|
2703fa75d6 | ||
|
|
605ec158f4 | ||
|
|
3b45d1b334 | ||
|
|
acb906164b | ||
|
|
98da4c2adb | ||
|
|
9e1c28fcff | ||
|
|
774db3fef4 | ||
|
|
8b90b0f0e3 | ||
|
|
a277489003 | ||
|
|
1084a37174 | ||
|
|
9e4dbf497c | ||
|
|
af0ca03752 | ||
|
|
37d9599dca | ||
|
|
08a676cfd4 | ||
|
|
f2e2060ff8 | ||
|
|
dc5f90025c | ||
|
|
8566a4f453 | ||
|
|
3366dc9a63 | ||
|
|
fa94799ec8 | ||
|
|
c424d1afee | ||
|
|
99f83a9bf0 | ||
|
|
aa7d4c5ecc | ||
|
|
552ee28072 | ||
|
|
fa77facacd | ||
|
|
5b28f3d964 | ||
|
|
c36748b8bc | ||
|
|
f16f5b0aa4 | ||
|
|
c889b92f45 | ||
|
|
46be20976a | ||
|
|
8c42f99d0b | ||
|
|
a93fd21f21 | ||
|
|
7523ea44f1 | ||
|
|
30ab0c0b71 | ||
|
|
a72f190ef6 | ||
|
|
cb60c1ec3b | ||
|
|
e108e04dda | ||
|
|
2e083bca28 | ||
|
|
198b13324d | ||
|
|
605dae3995 | ||
|
|
d2b0920454 | ||
|
|
2cbeca0e7c | ||
|
|
859e04a680 | ||
|
|
c0821d4ede | ||
|
|
c3a6e45920 | ||
|
|
818dfd6515 | ||
|
|
726b39aedd | ||
|
|
7638c21e92 | ||
|
|
c09d6075c6 | ||
|
|
39d37a7d28 | ||
|
|
de0315380d | ||
|
|
906ddacbc6 | ||
|
|
c388446668 | ||
|
|
d56971ca7c | ||
|
|
cb14d7530b | ||
|
|
fbb24c8c0a | ||
|
|
0b88b246d3 | ||
|
|
a4631f345b | ||
|
|
7be31ce3e5 | ||
|
|
57a7b8076f | ||
|
|
5309b1c02c | ||
|
|
ae09e6ebb7 | ||
|
|
3cd124dce3 | ||
|
|
25f5817a5e | ||
|
|
0510f19607 | ||
|
|
9d961c58ff | ||
|
|
ab25907050 | ||
|
|
6cbba80df1 | ||
|
|
082245c5db | ||
|
|
a82e020daa | ||
|
|
c8c2d44a5c | ||
|
|
4e7b8b57c0 | ||
|
|
e7020c7c0f | ||
|
|
cb54864387 | ||
|
|
0e0f385f72 | ||
|
|
b3cae25741 | ||
|
|
469b117a09 | ||
|
|
5f59734078 | ||
|
|
e00b3b9714 | ||
|
|
588872a316 | ||
|
|
a0feee257f | ||
|
|
a18ac330bb | ||
|
|
0901456320 | ||
|
|
dbd7b486d2 | ||
|
|
7e766282aa | ||
|
|
ba175148c8 | ||
|
|
358146ee54 | ||
|
|
e9dd16b023 | ||
|
|
1ba4098648 | ||
|
|
05fb8569b3 | ||
|
|
db45a375d0 | ||
|
|
81dd47a893 | ||
|
|
6b77a2a5a8 | ||
|
|
dc4cf3f57b | ||
|
|
d810b579a5 | ||
|
|
47c8de9dc3 | ||
|
|
74a53bde5e | ||
|
|
aad2615487 | ||
|
|
03d9b6a09f | ||
|
|
5d280cc8c8 | ||
|
|
133c4fb21e | ||
|
|
3b2d2c052e | ||
|
|
37c2a62fa8 | ||
|
|
2eeb56d1c8 | ||
|
|
a612b38f9b | ||
|
|
1c67ea65d9 | ||
|
|
c26fb5ad5f | ||
|
|
325c2cc385 | ||
|
|
2e551c9d0a | ||
|
|
7b44454d01 | ||
|
|
f2680b2f2d |
13
.dockerignore
Normal file
13
.dockerignore
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
Makefile
|
||||||
|
acceptance
|
||||||
|
runner
|
||||||
|
hack
|
||||||
|
test-assets
|
||||||
|
config
|
||||||
|
charts
|
||||||
|
.github
|
||||||
|
.envrc
|
||||||
|
.env
|
||||||
|
*.md
|
||||||
|
*.txt
|
||||||
|
*.sh
|
||||||
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Create a report to help us improve
|
||||||
|
title: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Describe the bug**
|
||||||
|
A clear and concise description of what the bug is.
|
||||||
|
|
||||||
|
**Checks**
|
||||||
|
|
||||||
|
- [ ] My actions-runner-controller version (v0.x.y) does support the feature
|
||||||
|
- [ ] I'm using an unreleased version of the controller I built from HEAD of the default branch
|
||||||
|
|
||||||
|
**To Reproduce**
|
||||||
|
Steps to reproduce the behavior:
|
||||||
|
1. Go to '...'
|
||||||
|
2. Click on '....'
|
||||||
|
3. Scroll down to '....'
|
||||||
|
4. See error
|
||||||
|
|
||||||
|
**Expected behavior**
|
||||||
|
A clear and concise description of what you expected to happen.
|
||||||
|
|
||||||
|
**Screenshots**
|
||||||
|
If applicable, add screenshots to help explain your problem.
|
||||||
|
|
||||||
|
**Environment (please complete the following information):**
|
||||||
|
- Controller Version [e.g. 0.18.2]
|
||||||
|
- Deployment Method [e.g. Helm and Kustomize ]
|
||||||
|
- Helm Chart Version [e.g. 0.11.0, if applicable]
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context about the problem here.
|
||||||
19
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
19
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Suggest an idea for this project
|
||||||
|
title: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Is your feature request related to a problem? Please describe.**
|
||||||
|
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||||
|
|
||||||
|
**Describe the solution you'd like**
|
||||||
|
A clear and concise description of what you want to happen.
|
||||||
|
|
||||||
|
**Describe alternatives you've considered**
|
||||||
|
A clear and concise description of any alternative solutions or features you've considered.
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context or screenshots about the feature request here.
|
||||||
34
.github/RELEASE_NOTE_TEMPLATE.md
vendored
Normal file
34
.github/RELEASE_NOTE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# Release Note Template
|
||||||
|
|
||||||
|
This is the template of actions-runner-controller's release notes.
|
||||||
|
|
||||||
|
Whenever a new release is made, I start by manually copy-pasting this template onto the GitHub UI for creating the release.
|
||||||
|
|
||||||
|
I then walk-through all the changes, take sometime to think abount best one-sentence explanations to tell the users about changes, write it all,
|
||||||
|
and click the publish button.
|
||||||
|
|
||||||
|
If you think you can improve future release notes in any way, please do submit a pull request to change the template below.
|
||||||
|
|
||||||
|
Note that even though it looks like a Go template, I don't use any templating to generate the changelog.
|
||||||
|
It's just that I'm used to reading and intepreting Go template by myself, not a computer program :)
|
||||||
|
|
||||||
|
**Title**:
|
||||||
|
|
||||||
|
```
|
||||||
|
v{{ .Version }}: {{ .TitlesOfImportantChanges }}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Body**:
|
||||||
|
|
||||||
|
```
|
||||||
|
**CAUTION:** If you're using the Helm chart, beware to review changes to CRDs and do manually upgrade CRDs! Helm installs CRDs only on installing a chart. It doesn't automatically upgrade CRDs. Otherwise you end up with troubles like #427, #467, and #468. Please refer to the [UPGRADING](charts/actions-runner-controller/docs/UPGRADING.md) docs for the latest process.
|
||||||
|
|
||||||
|
This release includes the following changes from contributors. Thank you!
|
||||||
|
|
||||||
|
- @{{ .GitHubUser }} fixed {{ .Feature }} to not break when ... (#{{ .PullRequestNumber }})
|
||||||
|
- @{{ .GitHubUser }} enhanced {{ .Feature }} to ... (#{{ .PullRequestNumber }})
|
||||||
|
- @{{ .GitHubUser }} added {{ .Feature }} for ... (#{{ .PullRequestNumber }})
|
||||||
|
- @{{ .GitHubUser }} fixed {{ .Topic }} in the documentation so that ... (#{{ .PullRequestNumber }})
|
||||||
|
- @{{ .GitHubUser }} added {{ .Topic }} to the documentation (#{{ .PullRequestNumber }})
|
||||||
|
- @{{ .GitHubUser }} improved the documentation about {{ .Topic }} to also cover ... (#{{ .PullRequestNumber }})
|
||||||
|
```
|
||||||
25
.github/lock.yml
vendored
Normal file
25
.github/lock.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# Configuration for Lock Threads
|
||||||
|
# Repo: https://github.com/dessant/lock-threads-app
|
||||||
|
# App: https://github.com/apps/lock
|
||||||
|
|
||||||
|
# Number of days of inactivity before a closed issue or pull request is locked
|
||||||
|
daysUntilLock: 7
|
||||||
|
|
||||||
|
# Skip issues and pull requests created before a given timestamp. Timestamp must
|
||||||
|
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
|
||||||
|
skipCreatedBefore: false
|
||||||
|
|
||||||
|
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
|
||||||
|
exemptLabels: []
|
||||||
|
|
||||||
|
# Label to add before locking, such as `outdated`. Set to `false` to disable
|
||||||
|
lockLabel: false
|
||||||
|
|
||||||
|
# Comment to post before locking. Set to `false` to disable
|
||||||
|
lockComment: >
|
||||||
|
This thread has been automatically locked since there has not been
|
||||||
|
any recent activity after it was closed. Please open a new issue for
|
||||||
|
related bugs.
|
||||||
|
|
||||||
|
# Assign `resolved` as the reason for locking. Set to `false` to disable
|
||||||
|
setLockReason: true
|
||||||
20
.github/renovate.json5
vendored
Normal file
20
.github/renovate.json5
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"extends": ["config:base"],
|
||||||
|
"packageRules": [
|
||||||
|
{
|
||||||
|
// automatically merge an update of runner
|
||||||
|
"matchPackageNames": ["actions/runner"],
|
||||||
|
"extractVersion": "^v(?<version>.*)$",
|
||||||
|
"automerge": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"regexManagers": [
|
||||||
|
{
|
||||||
|
// use https://github.com/actions/runner/releases
|
||||||
|
"fileMatch": [".github/workflows/build-and-release-runners.yml"],
|
||||||
|
"matchStrings": ["RUNNER_VERSION: +(?<currentValue>.*?)\\n"],
|
||||||
|
"depNameTemplate": "actions/runner",
|
||||||
|
"datasourceTemplate": "github-releases"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
66
.github/stale.yml
vendored
Normal file
66
.github/stale.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
# Configuration for probot-stale - https://github.com/probot/stale
|
||||||
|
|
||||||
|
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||||
|
daysUntilStale: 30
|
||||||
|
|
||||||
|
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||||
|
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||||
|
daysUntilClose: 14
|
||||||
|
|
||||||
|
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||||
|
onlyLabels: []
|
||||||
|
|
||||||
|
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||||
|
exemptLabels:
|
||||||
|
- pinned
|
||||||
|
- security
|
||||||
|
- enhancement
|
||||||
|
- refactor
|
||||||
|
- documentation
|
||||||
|
- chore
|
||||||
|
- needs-investigation
|
||||||
|
- bug
|
||||||
|
|
||||||
|
# Set to true to ignore issues in a project (defaults to false)
|
||||||
|
exemptProjects: false
|
||||||
|
|
||||||
|
# Set to true to ignore issues in a milestone (defaults to false)
|
||||||
|
exemptMilestones: false
|
||||||
|
|
||||||
|
# Set to true to ignore issues with an assignee (defaults to false)
|
||||||
|
exemptAssignees: false
|
||||||
|
|
||||||
|
# Label to use when marking as stale
|
||||||
|
staleLabel: stale
|
||||||
|
|
||||||
|
# Comment to post when marking as stale. Set to `false` to disable
|
||||||
|
markComment: >
|
||||||
|
This issue has been automatically marked as stale because it has not had
|
||||||
|
recent activity. It will be closed if no further activity occurs. Thank you
|
||||||
|
for your contributions.
|
||||||
|
|
||||||
|
# Comment to post when removing the stale label.
|
||||||
|
# unmarkComment: >
|
||||||
|
# Your comment here.
|
||||||
|
|
||||||
|
# Comment to post when closing a stale Issue or Pull Request.
|
||||||
|
# closeComment: >
|
||||||
|
# Your comment here.
|
||||||
|
|
||||||
|
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||||
|
limitPerRun: 30
|
||||||
|
|
||||||
|
# Limit to only `issues` or `pulls`
|
||||||
|
# only: issues
|
||||||
|
|
||||||
|
# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||||
|
# pulls:
|
||||||
|
# daysUntilStale: 30
|
||||||
|
# markComment: >
|
||||||
|
# This pull request has been automatically marked as stale because it has not had
|
||||||
|
# recent activity. It will be closed if no further activity occurs. Thank you
|
||||||
|
# for your contributions.
|
||||||
|
|
||||||
|
# issues:
|
||||||
|
# exemptLabels:
|
||||||
|
# - confirmed
|
||||||
72
.github/workflows/build-and-release-runners.yml
vendored
72
.github/workflows/build-and-release-runners.yml
vendored
@@ -13,25 +13,33 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- runner/patched/*
|
- runner/patched/*
|
||||||
- runner/Dockerfile
|
- runner/Dockerfile
|
||||||
- runner/dindrunner.Dockerfile
|
- runner/Dockerfile.ubuntu.1804
|
||||||
|
- runner/Dockerfile.dindrunner
|
||||||
- runner/entrypoint.sh
|
- runner/entrypoint.sh
|
||||||
- .github/workflows/build-and-release-runners.yml
|
- .github/workflows/build-and-release-runners.yml
|
||||||
|
|
||||||
|
env:
|
||||||
|
RUNNER_VERSION: 2.281.0
|
||||||
|
DOCKER_VERSION: 20.10.8
|
||||||
|
DOCKERHUB_USERNAME: summerwind
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Build ${{ matrix.name }}
|
name: Build ${{ matrix.name }}-ubuntu-${{ matrix.os-version }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- name: actions-runner
|
- name: actions-runner
|
||||||
|
os-version: 20.04
|
||||||
dockerfile: Dockerfile
|
dockerfile: Dockerfile
|
||||||
|
- name: actions-runner
|
||||||
|
os-version: 18.04
|
||||||
|
dockerfile: Dockerfile.ubuntu.1804
|
||||||
- name: actions-runner-dind
|
- name: actions-runner-dind
|
||||||
dockerfile: dindrunner.Dockerfile
|
os-version: 20.04
|
||||||
env:
|
dockerfile: Dockerfile.dindrunner
|
||||||
RUNNER_VERSION: 2.277.1
|
|
||||||
DOCKER_VERSION: 19.03.12
|
|
||||||
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
|
||||||
steps:
|
steps:
|
||||||
- name: Set outputs
|
- name: Set outputs
|
||||||
id: vars
|
id: vars
|
||||||
@@ -52,10 +60,10 @@ jobs:
|
|||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v1
|
||||||
if: ${{ github.event_name == 'push' || github.event_name == 'release' }}
|
if: ${{ github.event_name == 'push' || github.event_name == 'release' }}
|
||||||
with:
|
with:
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ secrets.DOCKER_USER }}
|
||||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
|
||||||
- name: Build and Push
|
- name: Build and Push Versioned Tags
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v2
|
||||||
with:
|
with:
|
||||||
context: ./runner
|
context: ./runner
|
||||||
@@ -66,6 +74,48 @@ jobs:
|
|||||||
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||||
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||||
tags: |
|
tags: |
|
||||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}
|
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-ubuntu-${{ matrix.os-version }}
|
||||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-${{ steps.vars.outputs.sha_short }}
|
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:v${{ env.RUNNER_VERSION }}-ubuntu-${{ matrix.os-version }}-${{ steps.vars.outputs.sha_short }}
|
||||||
|
|
||||||
|
latest-tags:
|
||||||
|
if: ${{ github.event_name == 'push' || github.event_name == 'release' }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Build ${{ matrix.name }}-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- name: actions-runner
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
- name: actions-runner-dind
|
||||||
|
dockerfile: Dockerfile.dindrunner
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
with:
|
||||||
|
version: latest
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USER }}
|
||||||
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and Push Latest Tag
|
||||||
|
uses: docker/build-push-action@v2
|
||||||
|
with:
|
||||||
|
context: ./runner
|
||||||
|
file: ./runner/${{ matrix.dockerfile }}
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
push: true
|
||||||
|
build-args: |
|
||||||
|
RUNNER_VERSION=${{ env.RUNNER_VERSION }}
|
||||||
|
DOCKER_VERSION=${{ env.DOCKER_VERSION }}
|
||||||
|
tags: |
|
||||||
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:latest
|
${{ env.DOCKERHUB_USERNAME }}/${{ matrix.name }}:latest
|
||||||
|
|||||||
8
.github/workflows/on-push-lint-charts.yml
vendored
8
.github/workflows/on-push-lint-charts.yml
vendored
@@ -4,9 +4,11 @@ on:
|
|||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- 'charts/**'
|
- 'charts/**'
|
||||||
|
- '!charts/actions-runner-controller/docs/**'
|
||||||
|
- '!charts/actions-runner-controller/*.md'
|
||||||
- '.github/**'
|
- '.github/**'
|
||||||
|
- '!.github/*.md'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
env:
|
env:
|
||||||
KUBE_SCORE_VERSION: 1.10.0
|
KUBE_SCORE_VERSION: 1.10.0
|
||||||
HELM_VERSION: v3.4.1
|
HELM_VERSION: v3.4.1
|
||||||
@@ -47,7 +49,7 @@ jobs:
|
|||||||
python-version: 3.7
|
python-version: 3.7
|
||||||
|
|
||||||
- name: Set up chart-testing
|
- name: Set up chart-testing
|
||||||
uses: helm/chart-testing-action@v2.0.1
|
uses: helm/chart-testing-action@v2.1.0
|
||||||
|
|
||||||
- name: Run chart-testing (list-changed)
|
- name: Run chart-testing (list-changed)
|
||||||
id: list-changed
|
id: list-changed
|
||||||
@@ -61,7 +63,7 @@ jobs:
|
|||||||
run: ct lint --config charts/.ci/ct-config.yaml
|
run: ct lint --config charts/.ci/ct-config.yaml
|
||||||
|
|
||||||
- name: Create kind cluster
|
- name: Create kind cluster
|
||||||
uses: helm/kind-action@v1.0.0
|
uses: helm/kind-action@v1.2.0
|
||||||
if: steps.list-changed.outputs.changed == 'true'
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
|
||||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||||
|
|||||||
@@ -7,7 +7,9 @@ on:
|
|||||||
- main # assume that the branch name may change in future
|
- main # assume that the branch name may change in future
|
||||||
paths:
|
paths:
|
||||||
- 'charts/**'
|
- 'charts/**'
|
||||||
|
- '!charts/actions-runner-controller/docs/**'
|
||||||
- '.github/**'
|
- '.github/**'
|
||||||
|
- '!**.md'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
env:
|
env:
|
||||||
@@ -50,7 +52,7 @@ jobs:
|
|||||||
python-version: 3.7
|
python-version: 3.7
|
||||||
|
|
||||||
- name: Set up chart-testing
|
- name: Set up chart-testing
|
||||||
uses: helm/chart-testing-action@v2.0.1
|
uses: helm/chart-testing-action@v2.1.0
|
||||||
|
|
||||||
- name: Run chart-testing (list-changed)
|
- name: Run chart-testing (list-changed)
|
||||||
id: list-changed
|
id: list-changed
|
||||||
@@ -64,7 +66,7 @@ jobs:
|
|||||||
run: ct lint --config charts/.ci/ct-config.yaml
|
run: ct lint --config charts/.ci/ct-config.yaml
|
||||||
|
|
||||||
- name: Create kind cluster
|
- name: Create kind cluster
|
||||||
uses: helm/kind-action@v1.0.0
|
uses: helm/kind-action@v1.2.0
|
||||||
if: steps.list-changed.outputs.changed == 'true'
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
|
||||||
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
# We need cert-manager already installed in the cluster because we assume the CRDs exist
|
||||||
@@ -95,7 +97,7 @@ jobs:
|
|||||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||||
|
|
||||||
- name: Run chart-releaser
|
- name: Run chart-releaser
|
||||||
uses: helm/chart-releaser-action@v1.1.0
|
uses: helm/chart-releaser-action@v1.2.1
|
||||||
env:
|
env:
|
||||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Release
|
name: Release
|
||||||
env:
|
env:
|
||||||
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||||
steps:
|
steps:
|
||||||
- name: Set outputs
|
- name: Set outputs
|
||||||
id: vars
|
id: vars
|
||||||
@@ -47,7 +47,7 @@ jobs:
|
|||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v1
|
||||||
with:
|
with:
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ secrets.DOCKER_USER }}
|
||||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
|
||||||
- name: Build and Push
|
- name: Build and Push
|
||||||
|
|||||||
12
.github/workflows/test.yaml
vendored
12
.github/workflows/test.yaml
vendored
@@ -7,6 +7,8 @@ on:
|
|||||||
paths-ignore:
|
paths-ignore:
|
||||||
- 'runner/**'
|
- 'runner/**'
|
||||||
- .github/workflows/build-and-release-runners.yml
|
- .github/workflows/build-and-release-runners.yml
|
||||||
|
- '*.md'
|
||||||
|
- '.gitignore'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
@@ -15,11 +17,15 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
- uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: '^1.16.5'
|
||||||
|
- run: go version
|
||||||
- name: Install kubebuilder
|
- name: Install kubebuilder
|
||||||
run: |
|
run: |
|
||||||
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.2.0/kubebuilder_2.2.0_linux_amd64.tar.gz
|
curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.3.2/kubebuilder_2.3.2_linux_amd64.tar.gz
|
||||||
tar zxvf kubebuilder_2.2.0_linux_amd64.tar.gz
|
tar zxvf kubebuilder_2.3.2_linux_amd64.tar.gz
|
||||||
sudo mv kubebuilder_2.2.0_linux_amd64 /usr/local/kubebuilder
|
sudo mv kubebuilder_2.3.2_linux_amd64 /usr/local/kubebuilder
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: make test
|
run: make test
|
||||||
- name: Verify manifests are up-to-date
|
- name: Verify manifests are up-to-date
|
||||||
|
|||||||
6
.github/workflows/wip.yml
vendored
6
.github/workflows/wip.yml
vendored
@@ -4,13 +4,15 @@ on:
|
|||||||
- master
|
- master
|
||||||
paths-ignore:
|
paths-ignore:
|
||||||
- "runner/**"
|
- "runner/**"
|
||||||
|
- "**.md"
|
||||||
|
- ".gitignore"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: release-latest
|
name: release-latest
|
||||||
env:
|
env:
|
||||||
DOCKERHUB_USERNAME: ${{ github.repository_owner }}
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKER_USER }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
@@ -27,7 +29,7 @@ jobs:
|
|||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v1
|
||||||
with:
|
with:
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ secrets.DOCKER_USER }}
|
||||||
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
|
||||||
# Considered unstable builds
|
# Considered unstable builds
|
||||||
|
|||||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -1,3 +1,4 @@
|
|||||||
|
# Deploy Assets
|
||||||
release
|
release
|
||||||
|
|
||||||
# Binaries for programs and plugins
|
# Binaries for programs and plugins
|
||||||
@@ -15,17 +16,21 @@ bin
|
|||||||
*.out
|
*.out
|
||||||
|
|
||||||
# Kubernetes Generated files - skip generated files, except for vendored files
|
# Kubernetes Generated files - skip generated files, except for vendored files
|
||||||
|
|
||||||
!vendor/**/zz_generated.*
|
!vendor/**/zz_generated.*
|
||||||
|
|
||||||
# editor and IDE paraphernalia
|
# editor and IDE paraphernalia
|
||||||
|
.vscode
|
||||||
.idea
|
.idea
|
||||||
*.swp
|
*.swp
|
||||||
*.swo
|
*.swo
|
||||||
*~
|
*~
|
||||||
|
|
||||||
.envrc
|
.envrc
|
||||||
|
.env
|
||||||
|
.test.env
|
||||||
*.pem
|
*.pem
|
||||||
|
|
||||||
# OS
|
# OS
|
||||||
.DS_STORE
|
.DS_STORE
|
||||||
|
|
||||||
|
/test-assets
|
||||||
|
|||||||
142
CONTRIBUTING.md
Normal file
142
CONTRIBUTING.md
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
## Contributing
|
||||||
|
|
||||||
|
### How to Contribute a Patch
|
||||||
|
|
||||||
|
Depending on what you are patching depends on how you should go about it. Below are some guides on how to test patches locally as well as develop the controller and runners.
|
||||||
|
|
||||||
|
When sumitting a PR for a change please provide evidence that your change works as we still need to work on improving the CI of the project. Some resources are provided for helping achieve this, see this guide for details.
|
||||||
|
|
||||||
|
#### Running an End to End Test
|
||||||
|
|
||||||
|
> **Notes for Ubuntu 20.04+ users**
|
||||||
|
>
|
||||||
|
> If you're using Ubuntu 20.04 or greater, you might have installed `docker` with `snap`.
|
||||||
|
>
|
||||||
|
> If you want to stick with `snap`-provided `docker`, do not forget to set `TMPDIR` to
|
||||||
|
> somewhere under `$HOME`.
|
||||||
|
> Otherwise `kind load docker-image` fail while running `docker save`.
|
||||||
|
> See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap for more information.
|
||||||
|
|
||||||
|
To test your local changes against both PAT and App based authentication please run the `acceptance` make target with the authentication configuration details provided:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# This sets `VERSION` envvar to some appropriate value
|
||||||
|
. hack/make-env.sh
|
||||||
|
|
||||||
|
DOCKER_USER=*** \
|
||||||
|
GITHUB_TOKEN=*** \
|
||||||
|
APP_ID=*** \
|
||||||
|
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||||
|
INSTALLATION_ID=*** \
|
||||||
|
make acceptance
|
||||||
|
```
|
||||||
|
|
||||||
|
**Rerunning a failed test**
|
||||||
|
|
||||||
|
When one of tests run by `make acceptance` failed, you'd probably like to rerun only the failed one.
|
||||||
|
|
||||||
|
It can be done by `make acceptance/run` and by setting the combination of `ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm|kubectl` and `ACCEPTANCE_TEST_SECRET_TYPE=token|app` values that failed (note, you just need to set the corresponding authentication configuration in this circumstance)
|
||||||
|
|
||||||
|
In the example below, we rerun the test for the combination `ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token` only:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
DOCKER_USER=*** \
|
||||||
|
GITHUB_TOKEN=*** \
|
||||||
|
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm
|
||||||
|
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
||||||
|
make acceptance/run
|
||||||
|
```
|
||||||
|
|
||||||
|
**Testing in a non-kind cluster**
|
||||||
|
|
||||||
|
If you prefer to test in a non-kind cluster, you can instead run:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
KUBECONFIG=path/to/kubeconfig \
|
||||||
|
DOCKER_USER=*** \
|
||||||
|
GITHUB_TOKEN=*** \
|
||||||
|
APP_ID=*** \
|
||||||
|
PRIVATE_KEY_FILE_PATH=path/to/pem/file \
|
||||||
|
INSTALLATION_ID=*** \
|
||||||
|
ACCEPTANCE_TEST_SECRET_TYPE=token \
|
||||||
|
make docker-build acceptance/setup \
|
||||||
|
acceptance/deploy \
|
||||||
|
acceptance/tests
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Developing the Controller
|
||||||
|
|
||||||
|
Rerunning the whole acceptance test suite from scratch on every little change to the controller, the runner, and the chart would be counter-productive.
|
||||||
|
|
||||||
|
To make your development cycle faster, use the below command to update deploy and update all the three:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# Let assume we have all other envvars like DOCKER_USER, GITHUB_TOKEN already set,
|
||||||
|
# The below command will (re)build `actions-runner-controller:controller1` and `actions-runner:runner1`,
|
||||||
|
# load those into kind nodes, and then rerun kubectl or helm to install/upgrade the controller,
|
||||||
|
# and finally upgrade the runner deployment to use the new runner image.
|
||||||
|
#
|
||||||
|
# As helm 3 and kubectl is unable to recreate a pod when no tag change,
|
||||||
|
# you either need to bump VERSION and RUNNER_TAG on each run,
|
||||||
|
# or manually run `kubectl delete pod $POD` on respective pods for changes to actually take effect.
|
||||||
|
|
||||||
|
VERSION=controller1 \
|
||||||
|
RUNNER_TAG=runner1 \
|
||||||
|
make acceptance/pull acceptance/kind docker-build acceptance/load acceptance/deploy
|
||||||
|
```
|
||||||
|
|
||||||
|
If you've already deployed actions-runner-controller and only want to recreate pods to use the newer image, you can run:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
NAME=$DOCKER_USER/actions-runner-controller \
|
||||||
|
make docker-build acceptance/load && \
|
||||||
|
kubectl -n actions-runner-system delete po $(kubectl -n actions-runner-system get po -ojsonpath={.items[*].metadata.name})
|
||||||
|
```
|
||||||
|
|
||||||
|
Similarly, if you'd like to recreate runner pods with the newer runner image,
|
||||||
|
|
||||||
|
```shell
|
||||||
|
NAME=$DOCKER_USER/actions-runner make \
|
||||||
|
-C runner docker-{build,push}-ubuntu && \
|
||||||
|
(kubectl get po -ojsonpath={.items[*].metadata.name} | xargs -n1 kubectl delete po)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Developing the Runners
|
||||||
|
|
||||||
|
**Tests**
|
||||||
|
|
||||||
|
A set of example pipelines (./acceptance/pipelines) are provided in this repository which you can use to validate your runners are working as expected. When raising a PR please run the relevant suites to prove your change hasn't broken anything.
|
||||||
|
|
||||||
|
**Running Ginkgo Tests**
|
||||||
|
|
||||||
|
You can run the integration test suite that is written in Ginkgo with:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
make test-with-deps
|
||||||
|
```
|
||||||
|
|
||||||
|
This will firstly install a few binaries required to setup the integration test environment and then runs `go test` to start the Ginkgo test.
|
||||||
|
|
||||||
|
If you don't want to use `make`, like when you're running tests from your IDE, install required binaries to `/usr/local/kubebuilder/bin`. That's the directory in which controller-runtime's `envtest` framework locates the binaries.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
sudo mkdir -p /usr/local/kubebuilder/bin
|
||||||
|
make kube-apiserver etcd
|
||||||
|
sudo mv test-assets/{etcd,kube-apiserver} /usr/local/kubebuilder/bin/
|
||||||
|
go test -v -run TestAPIs github.com/actions-runner-controller/actions-runner-controller/controllers
|
||||||
|
```
|
||||||
|
|
||||||
|
To run Ginkgo tests selectively, set the pattern of target test names to `GINKGO_FOCUS`.
|
||||||
|
All the Ginkgo test that matches `GINKGO_FOCUS` will be run.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
GINKGO_FOCUS='[It] should create a new Runner resource from the specified template, add a another Runner on replicas increased, and removes all the replicas when set to 0' \
|
||||||
|
go test -v -run TestAPIs github.com/actions-runner-controller/actions-runner-controller/controllers
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Helm Version Bumps
|
||||||
|
|
||||||
|
**Chart Version :** When bumping the chart version follow semantic versioning https://semver.org/<br />
|
||||||
|
**App Version :** When bumping the app version you will also need to bump the chart version too. Again, follow semantic versioning when bumping the chart.
|
||||||
|
|
||||||
|
To determine if you need to bump the MAJOR, MINOR or PATCH versions you will need to review the changes between the previous app version and the new app version and / or ask for a maintainer to advise.
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
# Build the manager binary
|
# Build the manager binary
|
||||||
FROM golang:1.15 as builder
|
FROM golang:1.17 as builder
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
|
|
||||||
|
|||||||
129
Makefile
129
Makefile
@@ -1,11 +1,32 @@
|
|||||||
NAME ?= summerwind/actions-runner-controller
|
ifdef DOCKER_USER
|
||||||
|
NAME ?= ${DOCKER_USER}/actions-runner-controller
|
||||||
|
else
|
||||||
|
NAME ?= summerwind/actions-runner-controller
|
||||||
|
endif
|
||||||
|
DOCKER_USER ?= $(shell echo ${NAME} | cut -d / -f1)
|
||||||
VERSION ?= latest
|
VERSION ?= latest
|
||||||
|
RUNNER_NAME ?= ${DOCKER_USER}/actions-runner
|
||||||
|
RUNNER_TAG ?= ${VERSION}
|
||||||
|
TEST_REPO ?= ${DOCKER_USER}/actions-runner-controller
|
||||||
|
TEST_ORG ?=
|
||||||
|
TEST_ORG_REPO ?=
|
||||||
|
TEST_EPHEMERAL ?= false
|
||||||
|
SYNC_PERIOD ?= 5m
|
||||||
|
USE_RUNNERSET ?=
|
||||||
|
RUNNER_FEATURE_FLAG_EPHEMERAL ?=
|
||||||
|
KUBECONTEXT ?= kind-acceptance
|
||||||
|
CLUSTER ?= acceptance
|
||||||
|
CERT_MANAGER_VERSION ?= v1.1.1
|
||||||
|
|
||||||
# From https://github.com/VictoriaMetrics/operator/pull/44
|
# From https://github.com/VictoriaMetrics/operator/pull/44
|
||||||
YAML_DROP=$(YQ) delete --inplace
|
YAML_DROP=$(YQ) delete --inplace
|
||||||
YAML_DROP_PREFIX=spec.validation.openAPIV3Schema.properties.spec.properties
|
|
||||||
|
# If you encounter errors like the below, you are very likely to update this to follow e.g. CRD version change:
|
||||||
|
# CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: spec.preserveUnknownFields: Invalid value: true: must be false in order to use defaults in the schema
|
||||||
|
YAML_DROP_PREFIX=spec.versions[0].schema.openAPIV3Schema.properties.spec.properties
|
||||||
|
|
||||||
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
|
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
|
||||||
CRD_OPTIONS ?= "crd:trivialVersions=true"
|
CRD_OPTIONS ?= "crd:trivialVersions=true,generateEmbeddedObjectMeta=true"
|
||||||
|
|
||||||
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||||
ifeq (,$(shell go env GOBIN))
|
ifeq (,$(shell go env GOBIN))
|
||||||
@@ -24,8 +45,8 @@ endif
|
|||||||
# if IMG_RESULT is unspecified, by default the image will be pushed to registry
|
# if IMG_RESULT is unspecified, by default the image will be pushed to registry
|
||||||
ifeq (${IMG_RESULT}, load)
|
ifeq (${IMG_RESULT}, load)
|
||||||
export PUSH_ARG="--load"
|
export PUSH_ARG="--load"
|
||||||
# if load is specified, image will be built only for the build machine architecture.
|
# if load is specified, image will be built only for the build machine architecture.
|
||||||
export PLATFORMS="local"
|
export PLATFORMS="local"
|
||||||
else ifeq (${IMG_RESULT}, cache)
|
else ifeq (${IMG_RESULT}, cache)
|
||||||
# if cache is specified, image will only be available in the build cache, it won't be pushed or loaded
|
# if cache is specified, image will only be available in the build cache, it won't be pushed or loaded
|
||||||
# therefore no PUSH_ARG will be specified
|
# therefore no PUSH_ARG will be specified
|
||||||
@@ -35,9 +56,11 @@ endif
|
|||||||
|
|
||||||
all: manager
|
all: manager
|
||||||
|
|
||||||
|
GO_TEST_ARGS ?= -short
|
||||||
|
|
||||||
# Run tests
|
# Run tests
|
||||||
test: generate fmt vet manifests
|
test: generate fmt vet manifests
|
||||||
go test ./... -coverprofile cover.out
|
go test $(GO_TEST_ARGS) ./... -coverprofile cover.out
|
||||||
|
|
||||||
test-with-deps: kube-apiserver etcd kubectl
|
test-with-deps: kube-apiserver etcd kubectl
|
||||||
# See https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants
|
# See https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants
|
||||||
@@ -105,12 +128,9 @@ generate: controller-gen
|
|||||||
$(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths="./..."
|
$(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths="./..."
|
||||||
|
|
||||||
# Build the docker image
|
# Build the docker image
|
||||||
docker-build: test
|
docker-build:
|
||||||
docker build . -t ${NAME}:${VERSION}
|
docker build . -t ${NAME}:${VERSION}
|
||||||
|
docker build runner -t ${RUNNER_NAME}:${RUNNER_TAG} --build-arg TARGETPLATFORM=$(shell arch)
|
||||||
# Push the docker image
|
|
||||||
docker-push:
|
|
||||||
docker push ${NAME}:${VERSION}
|
|
||||||
|
|
||||||
docker-buildx:
|
docker-buildx:
|
||||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
@@ -124,6 +144,11 @@ docker-buildx:
|
|||||||
-f Dockerfile \
|
-f Dockerfile \
|
||||||
. ${PUSH_ARG}
|
. ${PUSH_ARG}
|
||||||
|
|
||||||
|
# Push the docker image
|
||||||
|
docker-push:
|
||||||
|
docker push ${NAME}:${VERSION}
|
||||||
|
docker push ${RUNNER_NAME}:${RUNNER_TAG}
|
||||||
|
|
||||||
# Generate the release manifest file
|
# Generate the release manifest file
|
||||||
release: manifests
|
release: manifests
|
||||||
cd config/manager && kustomize edit set image controller=${NAME}:${VERSION}
|
cd config/manager && kustomize edit set image controller=${NAME}:${VERSION}
|
||||||
@@ -135,19 +160,41 @@ release/clean:
|
|||||||
rm -rf release
|
rm -rf release
|
||||||
|
|
||||||
.PHONY: acceptance
|
.PHONY: acceptance
|
||||||
acceptance: release/clean docker-build docker-push release
|
acceptance: release/clean acceptance/pull docker-build release
|
||||||
ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/run
|
||||||
ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/run
|
||||||
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=token make acceptance/run
|
||||||
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/kind acceptance/setup acceptance/tests acceptance/teardown
|
ACCEPTANCE_TEST_DEPLOYMENT_TOOL=helm ACCEPTANCE_TEST_SECRET_TYPE=app make acceptance/run
|
||||||
|
|
||||||
|
acceptance/run: acceptance/kind acceptance/load acceptance/setup acceptance/deploy acceptance/tests acceptance/teardown
|
||||||
|
|
||||||
acceptance/kind:
|
acceptance/kind:
|
||||||
kind create cluster --name acceptance
|
kind create cluster --name ${CLUSTER} --config acceptance/kind.yaml
|
||||||
kubectl cluster-info --context kind-acceptance
|
|
||||||
|
# Set TMPDIR to somewhere under $HOME when you use docker installed with Ubuntu snap
|
||||||
|
# Otherwise `load docker-image` fail while running `docker save`.
|
||||||
|
# See https://kind.sigs.k8s.io/docs/user/known-issues/#docker-installed-with-snap
|
||||||
|
acceptance/load:
|
||||||
|
kind load docker-image ${NAME}:${VERSION} --name ${CLUSTER}
|
||||||
|
kind load docker-image quay.io/brancz/kube-rbac-proxy:v0.10.0 --name ${CLUSTER}
|
||||||
|
kind load docker-image ${RUNNER_NAME}:${RUNNER_TAG} --name ${CLUSTER}
|
||||||
|
kind load docker-image docker:dind --name ${CLUSTER}
|
||||||
|
kind load docker-image quay.io/jetstack/cert-manager-controller:$(CERT_MANAGER_VERSION) --name ${CLUSTER}
|
||||||
|
kind load docker-image quay.io/jetstack/cert-manager-cainjector:$(CERT_MANAGER_VERSION) --name ${CLUSTER}
|
||||||
|
kind load docker-image quay.io/jetstack/cert-manager-webhook:$(CERT_MANAGER_VERSION) --name ${CLUSTER}
|
||||||
|
kubectl cluster-info --context ${KUBECONTEXT}
|
||||||
|
|
||||||
|
# Pull the docker images for acceptance
|
||||||
|
acceptance/pull:
|
||||||
|
docker pull quay.io/brancz/kube-rbac-proxy:v0.10.0
|
||||||
|
docker pull docker:dind
|
||||||
|
docker pull quay.io/jetstack/cert-manager-controller:$(CERT_MANAGER_VERSION)
|
||||||
|
docker pull quay.io/jetstack/cert-manager-cainjector:$(CERT_MANAGER_VERSION)
|
||||||
|
docker pull quay.io/jetstack/cert-manager-webhook:$(CERT_MANAGER_VERSION)
|
||||||
|
|
||||||
acceptance/setup:
|
acceptance/setup:
|
||||||
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/v1.0.4/cert-manager.yaml #kubectl create namespace actions-runner-system
|
kubectl apply --validate=false -f https://github.com/jetstack/cert-manager/releases/download/$(CERT_MANAGER_VERSION)/cert-manager.yaml #kubectl create namespace actions-runner-system
|
||||||
kubectl -n cert-manager wait deploy/cert-manager-cainjector --for condition=available --timeout 60s
|
kubectl -n cert-manager wait deploy/cert-manager-cainjector --for condition=available --timeout 90s
|
||||||
kubectl -n cert-manager wait deploy/cert-manager-webhook --for condition=available --timeout 60s
|
kubectl -n cert-manager wait deploy/cert-manager-webhook --for condition=available --timeout 60s
|
||||||
kubectl -n cert-manager wait deploy/cert-manager --for condition=available --timeout 60s
|
kubectl -n cert-manager wait deploy/cert-manager --for condition=available --timeout 60s
|
||||||
kubectl create namespace actions-runner-system || true
|
kubectl create namespace actions-runner-system || true
|
||||||
@@ -155,18 +202,37 @@ acceptance/setup:
|
|||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
acceptance/teardown:
|
acceptance/teardown:
|
||||||
kind delete cluster --name acceptance
|
kind delete cluster --name ${CLUSTER}
|
||||||
|
|
||||||
|
acceptance/deploy:
|
||||||
|
NAME=${NAME} DOCKER_USER=${DOCKER_USER} VERSION=${VERSION} RUNNER_NAME=${RUNNER_NAME} RUNNER_TAG=${RUNNER_TAG} TEST_REPO=${TEST_REPO} \
|
||||||
|
TEST_ORG=${TEST_ORG} TEST_ORG_REPO=${TEST_ORG_REPO} SYNC_PERIOD=${SYNC_PERIOD} \
|
||||||
|
USE_RUNNERSET=${USE_RUNNERSET} \
|
||||||
|
TEST_EPHEMERAL=${TEST_EPHEMERAL} \
|
||||||
|
RUNNER_FEATURE_FLAG_EPHEMERAL=${RUNNER_FEATURE_FLAG_EPHEMERAL} \
|
||||||
|
acceptance/deploy.sh
|
||||||
|
|
||||||
acceptance/tests:
|
acceptance/tests:
|
||||||
acceptance/deploy.sh
|
|
||||||
acceptance/checks.sh
|
acceptance/checks.sh
|
||||||
|
|
||||||
|
# We use -count=1 instead of `go clean -testcache`
|
||||||
|
# See https://terratest.gruntwork.io/docs/testing-best-practices/avoid-test-caching/
|
||||||
|
.PHONY: e2e
|
||||||
|
e2e:
|
||||||
|
go test -count=1 -v -timeout 600s -run '^TestE2E$$' ./test/e2e
|
||||||
|
|
||||||
# Upload release file to GitHub.
|
# Upload release file to GitHub.
|
||||||
github-release: release
|
github-release: release
|
||||||
ghr ${VERSION} release/
|
ghr ${VERSION} release/
|
||||||
|
|
||||||
# find or download controller-gen
|
# Find or download controller-gen
|
||||||
# download controller-gen if necessary
|
#
|
||||||
|
# Note that controller-gen newer than 0.4.1 is needed for https://github.com/kubernetes-sigs/controller-tools/issues/444#issuecomment-680168439
|
||||||
|
# Otherwise we get errors like the below:
|
||||||
|
# Error: failed to install CRD crds/actions.summerwind.dev_runnersets.yaml: CustomResourceDefinition.apiextensions.k8s.io "runnersets.actions.summerwind.dev" is invalid: [spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[containers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property, spec.validation.openAPIV3Schema.properties[spec].properties[template].properties[spec].properties[initContainers].items.properties[ports].items.properties[protocol].default: Required value: this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property]
|
||||||
|
#
|
||||||
|
# Note that controller-gen newer than 0.6.0 is needed due to https://github.com/kubernetes-sigs/controller-tools/issues/448
|
||||||
|
# Otherwise ObjectMeta embedded in Spec results in empty on the storage.
|
||||||
controller-gen:
|
controller-gen:
|
||||||
ifeq (, $(shell which controller-gen))
|
ifeq (, $(shell which controller-gen))
|
||||||
ifeq (, $(wildcard $(GOBIN)/controller-gen))
|
ifeq (, $(wildcard $(GOBIN)/controller-gen))
|
||||||
@@ -175,7 +241,7 @@ ifeq (, $(wildcard $(GOBIN)/controller-gen))
|
|||||||
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
|
CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\
|
||||||
cd $$CONTROLLER_GEN_TMP_DIR ;\
|
cd $$CONTROLLER_GEN_TMP_DIR ;\
|
||||||
go mod init tmp ;\
|
go mod init tmp ;\
|
||||||
go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.3.0 ;\
|
go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.6.0 ;\
|
||||||
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
|
rm -rf $$CONTROLLER_GEN_TMP_DIR ;\
|
||||||
}
|
}
|
||||||
endif
|
endif
|
||||||
@@ -205,6 +271,7 @@ OS_NAME := $(shell uname -s | tr A-Z a-z)
|
|||||||
|
|
||||||
# find or download etcd
|
# find or download etcd
|
||||||
etcd:
|
etcd:
|
||||||
|
ifeq (, $(shell which etcd))
|
||||||
ifeq (, $(wildcard $(TEST_ASSETS)/etcd))
|
ifeq (, $(wildcard $(TEST_ASSETS)/etcd))
|
||||||
@{ \
|
@{ \
|
||||||
set -xe ;\
|
set -xe ;\
|
||||||
@@ -222,9 +289,13 @@ ETCD_BIN=$(TEST_ASSETS)/etcd
|
|||||||
else
|
else
|
||||||
ETCD_BIN=$(TEST_ASSETS)/etcd
|
ETCD_BIN=$(TEST_ASSETS)/etcd
|
||||||
endif
|
endif
|
||||||
|
else
|
||||||
|
ETCD_BIN=$(shell which etcd)
|
||||||
|
endif
|
||||||
|
|
||||||
# find or download kube-apiserver
|
# find or download kube-apiserver
|
||||||
kube-apiserver:
|
kube-apiserver:
|
||||||
|
ifeq (, $(shell which kube-apiserver))
|
||||||
ifeq (, $(wildcard $(TEST_ASSETS)/kube-apiserver))
|
ifeq (, $(wildcard $(TEST_ASSETS)/kube-apiserver))
|
||||||
@{ \
|
@{ \
|
||||||
set -xe ;\
|
set -xe ;\
|
||||||
@@ -242,10 +313,13 @@ KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
|
|||||||
else
|
else
|
||||||
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
|
KUBE_APISERVER_BIN=$(TEST_ASSETS)/kube-apiserver
|
||||||
endif
|
endif
|
||||||
|
else
|
||||||
|
KUBE_APISERVER_BIN=$(shell which kube-apiserver)
|
||||||
|
endif
|
||||||
|
|
||||||
# find or download kubectl
|
# find or download kubectl
|
||||||
kubectl:
|
kubectl:
|
||||||
|
ifeq (, $(shell which kubectl))
|
||||||
ifeq (, $(wildcard $(TEST_ASSETS)/kubectl))
|
ifeq (, $(wildcard $(TEST_ASSETS)/kubectl))
|
||||||
@{ \
|
@{ \
|
||||||
set -xe ;\
|
set -xe ;\
|
||||||
@@ -263,3 +337,6 @@ KUBECTL_BIN=$(TEST_ASSETS)/kubectl
|
|||||||
else
|
else
|
||||||
KUBECTL_BIN=$(TEST_ASSETS)/kubectl
|
KUBECTL_BIN=$(TEST_ASSETS)/kubectl
|
||||||
endif
|
endif
|
||||||
|
else
|
||||||
|
KUBECTL_BIN=$(shell which kubectl)
|
||||||
|
endif
|
||||||
|
|||||||
2
PROJECT
2
PROJECT
@@ -1,5 +1,5 @@
|
|||||||
domain: summerwind.dev
|
domain: summerwind.dev
|
||||||
repo: github.com/summerwind/actions-runner-controller
|
repo: github.com/actions-runner-controller/actions-runner-controller
|
||||||
resources:
|
resources:
|
||||||
- group: actions
|
- group: actions
|
||||||
kind: Runner
|
kind: Runner
|
||||||
|
|||||||
@@ -1,29 +1,84 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
set -e
|
set +e
|
||||||
|
|
||||||
runner_name=
|
repo_runnerdeployment_passed="skipped"
|
||||||
|
repo_runnerset_passed="skipped"
|
||||||
|
|
||||||
while [ -z "${runner_name}" ]; do
|
echo "Checking if RunnerDeployment repo test is set"
|
||||||
echo Finding the runner... 1>&2
|
if [ "${TEST_REPO}" ] && [ ! "${USE_RUNNERSET}" ]; then
|
||||||
sleep 1
|
runner_name=
|
||||||
runner_name=$(kubectl get runner --output=jsonpath="{.items[*].metadata.name}")
|
count=0
|
||||||
done
|
while [ $count -le 30 ]; do
|
||||||
|
echo "Finding Runner ..."
|
||||||
|
runner_name=$(kubectl get runner --output=jsonpath="{.items[*].metadata.name}")
|
||||||
|
if [ "${runner_name}" ]; then
|
||||||
|
while [ $count -le 30 ]; do
|
||||||
|
runner_pod_name=
|
||||||
|
echo "Found Runner \""${runner_name}"\""
|
||||||
|
echo "Finding underlying pod ..."
|
||||||
|
runner_pod_name=$(kubectl get pod --output=jsonpath="{.items[*].metadata.name}" | grep ${runner_name})
|
||||||
|
if [ "${runner_pod_name}" ]; then
|
||||||
|
echo "Found underlying pod \""${runner_pod_name}"\""
|
||||||
|
echo "Waiting for pod \""${runner_pod_name}"\" to become ready..."
|
||||||
|
kubectl wait pod/${runner_pod_name} --for condition=ready --timeout 270s
|
||||||
|
break 2
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
let "count=count+1"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
let "count=count+1"
|
||||||
|
done
|
||||||
|
if [ $count -ge 30 ]; then
|
||||||
|
repo_runnerdeployment_passed=false
|
||||||
|
else
|
||||||
|
repo_runnerdeployment_passed=true
|
||||||
|
fi
|
||||||
|
echo "Checking if RunnerSet repo test is set"
|
||||||
|
elif [ "${TEST_REPO}" ] && [ "${USE_RUNNERSET}" ]; then
|
||||||
|
runnerset_name=
|
||||||
|
count=0
|
||||||
|
while [ $count -le 30 ]; do
|
||||||
|
echo "Finding RunnerSet ..."
|
||||||
|
runnerset_name=$(kubectl get runnerset --output=jsonpath="{.items[*].metadata.name}")
|
||||||
|
if [ "${runnerset_name}" ]; then
|
||||||
|
while [ $count -le 30 ]; do
|
||||||
|
runnerset_pod_name=
|
||||||
|
echo "Found RunnerSet \""${runnerset_name}"\""
|
||||||
|
echo "Finding underlying pod ..."
|
||||||
|
runnerset_pod_name=$(kubectl get pod --output=jsonpath="{.items[*].metadata.name}" | grep ${runnerset_name})
|
||||||
|
echo "BEFORE IF"
|
||||||
|
if [ "${runnerset_pod_name}" ]; then
|
||||||
|
echo "AFTER IF"
|
||||||
|
echo "Found underlying pod \""${runnerset_pod_name}"\""
|
||||||
|
echo "Waiting for pod \""${runnerset_pod_name}"\" to become ready..."
|
||||||
|
kubectl wait pod/${runnerset_pod_name} --for condition=ready --timeout 270s
|
||||||
|
break 2
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
let "count=count+1"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
let "count=count+1"
|
||||||
|
done
|
||||||
|
if [ $count -ge 30 ]; then
|
||||||
|
repo_runnerset_passed=false
|
||||||
|
else
|
||||||
|
repo_runnerset_passed=true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
echo Found runner ${runner_name}.
|
if [ ${repo_runnerset_passed} == true ] || [ ${repo_runnerset_passed} == "skipped" ] && \
|
||||||
|
[ ${repo_runnerdeployment_passed} == true ] || [ ${repo_runnerdeployment_passed} == "skipped" ]; then
|
||||||
pod_name=
|
echo "INFO : All tests passed or skipped"
|
||||||
|
echo "RunnerSet Repo Test Status : ${repo_runnerset_passed}"
|
||||||
while [ -z "${pod_name}" ]; do
|
echo "RunnerDeployment Repo Test Status : ${repo_runnerdeployment_passed}"
|
||||||
echo Finding the runner pod... 1>&2
|
else
|
||||||
sleep 1
|
echo "ERROR : Some tests failed"
|
||||||
pod_name=$(kubectl get pod --output=jsonpath="{.items[*].metadata.name}" | grep ${runner_name})
|
echo "RunnerSet Repo Test Status : ${repo_runnerset_passed}"
|
||||||
done
|
echo "RunnerDeployment Repo Test Status : ${repo_runnerdeployment_passed}"
|
||||||
|
exit 1
|
||||||
echo Found pod ${pod_name}.
|
fi
|
||||||
|
|
||||||
echo Waiting for pod ${runner_name} to become ready... 1>&2
|
|
||||||
|
|
||||||
kubectl wait pod/${runner_name} --for condition=ready --timeout 180s
|
|
||||||
|
|
||||||
echo All tests passed. 1>&2
|
|
||||||
@@ -4,10 +4,14 @@ set -e
|
|||||||
|
|
||||||
tpe=${ACCEPTANCE_TEST_SECRET_TYPE}
|
tpe=${ACCEPTANCE_TEST_SECRET_TYPE}
|
||||||
|
|
||||||
|
VALUES_FILE=${VALUES_FILE:-$(dirname $0)/values.yaml}
|
||||||
|
|
||||||
if [ "${tpe}" == "token" ]; then
|
if [ "${tpe}" == "token" ]; then
|
||||||
kubectl create secret generic controller-manager \
|
if ! kubectl get secret controller-manager -n actions-runner-system >/dev/null; then
|
||||||
-n actions-runner-system \
|
kubectl create secret generic controller-manager \
|
||||||
--from-literal=github_token=${GITHUB_TOKEN:?GITHUB_TOKEN must not be empty}
|
-n actions-runner-system \
|
||||||
|
--from-literal=github_token=${GITHUB_TOKEN:?GITHUB_TOKEN must not be empty}
|
||||||
|
fi
|
||||||
elif [ "${tpe}" == "app" ]; then
|
elif [ "${tpe}" == "app" ]; then
|
||||||
kubectl create secret generic controller-manager \
|
kubectl create secret generic controller-manager \
|
||||||
-n actions-runner-system \
|
-n actions-runner-system \
|
||||||
@@ -26,17 +30,46 @@ if [ "${tool}" == "helm" ]; then
|
|||||||
charts/actions-runner-controller \
|
charts/actions-runner-controller \
|
||||||
-n actions-runner-system \
|
-n actions-runner-system \
|
||||||
--create-namespace \
|
--create-namespace \
|
||||||
--set syncPeriod=5m
|
--set syncPeriod=${SYNC_PERIOD} \
|
||||||
kubectl -n actions-runner-system wait deploy/actions-runner-controller --for condition=available
|
--set authSecret.create=false \
|
||||||
|
--set image.repository=${NAME} \
|
||||||
|
--set image.tag=${VERSION} \
|
||||||
|
-f ${VALUES_FILE}
|
||||||
|
kubectl apply -f charts/actions-runner-controller/crds
|
||||||
|
kubectl -n actions-runner-system wait deploy/actions-runner-controller --for condition=available --timeout 60s
|
||||||
else
|
else
|
||||||
kubectl apply \
|
kubectl apply \
|
||||||
-n actions-runner-system \
|
-n actions-runner-system \
|
||||||
-f release/actions-runner-controller.yaml
|
-f release/actions-runner-controller.yaml
|
||||||
kubectl -n actions-runner-system wait deploy/controller-manager --for condition=available --timeout 60s
|
kubectl -n actions-runner-system wait deploy/controller-manager --for condition=available --timeout 120s
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
|
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
|
||||||
sleep 20
|
sleep 20
|
||||||
|
|
||||||
kubectl apply \
|
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
||||||
-f acceptance/testdata/runnerdeploy.yaml
|
|
||||||
|
if [ -n "${TEST_REPO}" ]; then
|
||||||
|
if [ -n "USE_RUNNERSET" ]; then
|
||||||
|
cat acceptance/testdata/repo.runnerset.yaml | envsubst | kubectl apply -f -
|
||||||
|
cat acceptance/testdata/repo.runnerset.hra.yaml | envsubst | kubectl apply -f -
|
||||||
|
else
|
||||||
|
echo 'Deploying runnerdeployment and hra. Set USE_RUNNERSET if you want to deploy runnerset instead.'
|
||||||
|
cat acceptance/testdata/repo.runnerdeploy.yaml | envsubst | kubectl apply -f -
|
||||||
|
cat acceptance/testdata/repo.hra.yaml | envsubst | kubectl apply -f -
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo 'Skipped deploying runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy.'
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${TEST_ORG}" ]; then
|
||||||
|
cat acceptance/testdata/org.runnerdeploy.yaml | envsubst | kubectl apply -f -
|
||||||
|
|
||||||
|
if [ -n "${TEST_ORG_REPO}" ]; then
|
||||||
|
cat acceptance/testdata/org.hra.yaml | envsubst | kubectl apply -f -
|
||||||
|
else
|
||||||
|
echo 'Skipped deploying organizational hra. Set TEST_ORG_REPO to "yourorg/yourrepo" to deploy.'
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo 'Skipped deploying organizational runnerdeployment. Set TEST_ORG to deploy.'
|
||||||
|
fi
|
||||||
|
|||||||
10
acceptance/kind.yaml
Normal file
10
acceptance/kind.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
apiVersion: kind.x-k8s.io/v1alpha4
|
||||||
|
kind: Cluster
|
||||||
|
nodes:
|
||||||
|
- role: control-plane
|
||||||
|
extraPortMappings:
|
||||||
|
- containerPort: 31000
|
||||||
|
hostPort: 31000
|
||||||
|
listenAddress: "0.0.0.0"
|
||||||
|
protocol: tcp
|
||||||
|
#- role: worker
|
||||||
36
acceptance/pipelines/eks-integration-tests.yaml
Normal file
36
acceptance/pipelines/eks-integration-tests.yaml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
name: EKS Integration Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
IRSA_ROLE_ARN:
|
||||||
|
ASSUME_ROLE_ARN:
|
||||||
|
AWS_REGION:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
assume-role-in-runner-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- name: Test aws-actions/configure-aws-credentials Action
|
||||||
|
uses: aws-actions/configure-aws-credentials@v1
|
||||||
|
with:
|
||||||
|
aws-region: ${{ env.AWS_REGION }}
|
||||||
|
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
|
||||||
|
role-duration-seconds: 900
|
||||||
|
assume-role-in-container-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
container:
|
||||||
|
image: amazon/aws-cli
|
||||||
|
env:
|
||||||
|
AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||||
|
AWS_ROLE_ARN: ${{ env.IRSA_ROLE_ARN }}
|
||||||
|
volumes:
|
||||||
|
- /var/run/secrets/eks.amazonaws.com/serviceaccount/token:/var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
||||||
|
steps:
|
||||||
|
- name: Test aws-actions/configure-aws-credentials Action in container
|
||||||
|
uses: aws-actions/configure-aws-credentials@v1
|
||||||
|
with:
|
||||||
|
aws-region: ${{ env.AWS_REGION }}
|
||||||
|
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
|
||||||
|
role-duration-seconds: 900
|
||||||
83
acceptance/pipelines/runner-integration-tests.yaml
Normal file
83
acceptance/pipelines/runner-integration-tests.yaml
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
name: Runner Integration Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
env:
|
||||||
|
ImageOS: ubuntu18 # Used by ruby/setup-ruby action | Update me for the runner OS version you are testing against
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run-step-in-container-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
container:
|
||||||
|
image: alpine
|
||||||
|
steps:
|
||||||
|
- name: Test we are working in the container
|
||||||
|
run: |
|
||||||
|
if [[ $(sed -n '2p' < /etc/os-release | cut -d "=" -f2) != "alpine" ]]; then
|
||||||
|
echo "::error ::Failed OS detection test, could not match /etc/os-release with alpine. Are we really running in the container?"
|
||||||
|
echo "/etc/os-release below:"
|
||||||
|
cat /etc/os-release
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
setup-python-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- name: Print native Python environment
|
||||||
|
run: |
|
||||||
|
which python
|
||||||
|
python --version
|
||||||
|
- uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: 3.9
|
||||||
|
- name: Test actions/setup-python works
|
||||||
|
run: |
|
||||||
|
VERSION=$(python --version 2>&1 | cut -d ' ' -f2 | cut -d '.' -f1-2)
|
||||||
|
if [[ $VERSION != '3.9' ]]; then
|
||||||
|
echo "Python version detected : $(python --version 2>&1)"
|
||||||
|
echo "::error ::Detected python failed setup version test, could not match version with version specified in the setup action"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "Python version detected : $(python --version 2>&1)"
|
||||||
|
fi
|
||||||
|
setup-node-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- uses: actions/setup-node@v2
|
||||||
|
with:
|
||||||
|
node-version: '12'
|
||||||
|
- name: Test actions/setup-node works
|
||||||
|
run: |
|
||||||
|
VERSION=$(node --version | cut -c 2- | cut -d '.' -f1)
|
||||||
|
if [[ $VERSION != '12' ]]; then
|
||||||
|
echo "Node version detected : $(node --version 2>&1)"
|
||||||
|
echo "::error ::Detected node failed setup version test, could not match version with version specified in the setup action"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "Node version detected : $(node --version 2>&1)"
|
||||||
|
fi
|
||||||
|
setup-ruby-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- uses: ruby/setup-ruby@v1
|
||||||
|
with:
|
||||||
|
ruby-version: 3.0
|
||||||
|
bundler-cache: true
|
||||||
|
- name: Test ruby/setup-ruby works
|
||||||
|
run: |
|
||||||
|
VERSION=$(ruby --version | cut -d ' ' -f2 | cut -d '.' -f1-2)
|
||||||
|
if [[ $VERSION != '3.0' ]]; then
|
||||||
|
echo "Ruby version detected : $(ruby --version 2>&1)"
|
||||||
|
echo "::error ::Detected ruby failed setup version test, could not match version with version specified in the setup action"
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "Ruby version detected : $(ruby --version 2>&1)"
|
||||||
|
fi
|
||||||
|
python-shell-test:
|
||||||
|
runs-on: ['self-hosted', 'Linux']
|
||||||
|
steps:
|
||||||
|
- name: Test Python shell works
|
||||||
|
run: |
|
||||||
|
import os
|
||||||
|
print(os.environ['PATH'])
|
||||||
|
shell: python
|
||||||
36
acceptance/testdata/org.hra.yaml
vendored
Normal file
36
acceptance/testdata/org.hra.yaml
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
metadata:
|
||||||
|
name: org
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
name: org-runnerdeploy
|
||||||
|
scaleUpTriggers:
|
||||||
|
- githubEvent:
|
||||||
|
checkRun:
|
||||||
|
types: ["created"]
|
||||||
|
status: "queued"
|
||||||
|
amount: 1
|
||||||
|
duration: "1m"
|
||||||
|
scheduledOverrides:
|
||||||
|
- startTime: "2021-05-11T16:05:00+09:00"
|
||||||
|
endTime: "2021-05-11T16:40:00+09:00"
|
||||||
|
minReplicas: 2
|
||||||
|
- startTime: "2021-05-01T00:00:00+09:00"
|
||||||
|
endTime: "2021-05-03T00:00:00+09:00"
|
||||||
|
recurrenceRule:
|
||||||
|
frequency: Weekly
|
||||||
|
untilTime: "2022-05-01T00:00:00+09:00"
|
||||||
|
minReplicas: 0
|
||||||
|
minReplicas: 0
|
||||||
|
maxReplicas: 5
|
||||||
|
# Used to test that HRA is working for org runners
|
||||||
|
metrics:
|
||||||
|
- type: PercentageRunnersBusy
|
||||||
|
scaleUpThreshold: '0.75'
|
||||||
|
scaleDownThreshold: '0.3'
|
||||||
|
scaleUpFactor: '2'
|
||||||
|
scaleDownFactor: '0.5'
|
||||||
|
- type: TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||||
|
repositoryNames:
|
||||||
|
- ${TEST_ORG_REPO}
|
||||||
37
acceptance/testdata/org.runnerdeploy.yaml
vendored
Normal file
37
acceptance/testdata/org.runnerdeploy.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: RunnerDeployment
|
||||||
|
metadata:
|
||||||
|
name: org-runnerdeploy
|
||||||
|
spec:
|
||||||
|
# replicas: 1
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
organization: ${TEST_ORG}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Custom runner image
|
||||||
|
#
|
||||||
|
image: ${RUNNER_NAME}:${RUNNER_TAG}
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
|
||||||
|
#
|
||||||
|
# dockerd within runner container
|
||||||
|
#
|
||||||
|
## Replace `mumoshu/actions-runner-dind:dev` with your dind image
|
||||||
|
#dockerdWithinRunnerContainer: true
|
||||||
|
#image: mumoshu/actions-runner-dind:dev
|
||||||
|
|
||||||
|
#
|
||||||
|
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
|
||||||
|
#
|
||||||
|
#dockerMTU: 1450
|
||||||
|
|
||||||
|
#Runner group
|
||||||
|
# labels:
|
||||||
|
# - "mylabel 1"
|
||||||
|
# - "mylabel 2"
|
||||||
|
|
||||||
|
#
|
||||||
|
# Non-standard working directory
|
||||||
|
#
|
||||||
|
# workDir: "/"
|
||||||
25
acceptance/testdata/repo.hra.yaml
vendored
Normal file
25
acceptance/testdata/repo.hra.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
metadata:
|
||||||
|
name: actions-runner-aos-autoscaler
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
name: example-runnerdeploy
|
||||||
|
scaleUpTriggers:
|
||||||
|
- githubEvent:
|
||||||
|
checkRun:
|
||||||
|
types: ["created"]
|
||||||
|
status: "queued"
|
||||||
|
amount: 1
|
||||||
|
duration: "1m"
|
||||||
|
minReplicas: 0
|
||||||
|
maxReplicas: 5
|
||||||
|
metrics:
|
||||||
|
- type: PercentageRunnersBusy
|
||||||
|
scaleUpThreshold: '0.75'
|
||||||
|
scaleDownThreshold: '0.3'
|
||||||
|
scaleUpFactor: '2'
|
||||||
|
scaleDownFactor: '0.5'
|
||||||
|
- type: TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||||
|
repositoryNames:
|
||||||
|
- ${TEST_REPO}
|
||||||
@@ -6,7 +6,14 @@ spec:
|
|||||||
# replicas: 1
|
# replicas: 1
|
||||||
template:
|
template:
|
||||||
spec:
|
spec:
|
||||||
repository: mumoshu/actions-runner-controller-ci
|
repository: ${TEST_REPO}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Custom runner image
|
||||||
|
#
|
||||||
|
image: ${RUNNER_NAME}:${RUNNER_TAG}
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
|
||||||
#
|
#
|
||||||
# dockerd within runner container
|
# dockerd within runner container
|
||||||
#
|
#
|
||||||
@@ -15,6 +22,16 @@ spec:
|
|||||||
#image: mumoshu/actions-runner-dind:dev
|
#image: mumoshu/actions-runner-dind:dev
|
||||||
|
|
||||||
#
|
#
|
||||||
# Set the MTU used by dockerd-managed network interfaces (including docker-build)
|
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
|
||||||
#
|
#
|
||||||
#dockerMTU: 1450
|
#dockerMTU: 1450
|
||||||
|
|
||||||
|
#Runner group
|
||||||
|
# labels:
|
||||||
|
# - "mylabel 1"
|
||||||
|
# - "mylabel 2"
|
||||||
|
|
||||||
|
#
|
||||||
|
# Non-standard working directory
|
||||||
|
#
|
||||||
|
# workDir: "/"
|
||||||
29
acceptance/testdata/repo.runnerset.hra.yaml
vendored
Normal file
29
acceptance/testdata/repo.runnerset.hra.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: HorizontalRunnerAutoscaler
|
||||||
|
metadata:
|
||||||
|
name: example-runnerset
|
||||||
|
spec:
|
||||||
|
scaleTargetRef:
|
||||||
|
kind: RunnerSet
|
||||||
|
name: example-runnerset
|
||||||
|
scaleUpTriggers:
|
||||||
|
- githubEvent:
|
||||||
|
checkRun:
|
||||||
|
types: ["created"]
|
||||||
|
status: "queued"
|
||||||
|
amount: 1
|
||||||
|
duration: "1m"
|
||||||
|
# RunnerSet doesn't support scale from/to zero yet
|
||||||
|
minReplicas: 1
|
||||||
|
maxReplicas: 5
|
||||||
|
# This should be less than 600(seconds, the default) for faster testing
|
||||||
|
scaleDownDelaySecondsAfterScaleOut: 60
|
||||||
|
metrics:
|
||||||
|
- type: PercentageRunnersBusy
|
||||||
|
scaleUpThreshold: '0.75'
|
||||||
|
scaleDownThreshold: '0.3'
|
||||||
|
scaleUpFactor: '2'
|
||||||
|
scaleDownFactor: '0.5'
|
||||||
|
- type: TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||||
|
repositoryNames:
|
||||||
|
- ${TEST_REPO}
|
||||||
59
acceptance/testdata/repo.runnerset.yaml
vendored
Normal file
59
acceptance/testdata/repo.runnerset.yaml
vendored
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
apiVersion: actions.summerwind.dev/v1alpha1
|
||||||
|
kind: RunnerSet
|
||||||
|
metadata:
|
||||||
|
name: example-runnerset
|
||||||
|
spec:
|
||||||
|
# MANDATORY because it is based on StatefulSet: Results in a below error when omitted:
|
||||||
|
# missing required field "selector" in dev.summerwind.actions.v1alpha1.RunnerSet.spec
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: example-runnerset
|
||||||
|
|
||||||
|
# MANDATORY because it is based on StatefulSet: Results in a below error when omitted:
|
||||||
|
# missing required field "serviceName" in dev.summerwind.actions.v1alpha1.RunnerSet.spec]
|
||||||
|
serviceName: example-runnerset
|
||||||
|
|
||||||
|
#replicas: 1
|
||||||
|
|
||||||
|
# From my limited testing, `ephemeral: true` is more reliable.
|
||||||
|
# Seomtimes, updating already deployed runners from `ephemeral: false` to `ephemeral: true` seems to
|
||||||
|
# result in queued jobs hanging forever.
|
||||||
|
ephemeral: ${TEST_EPHEMERAL}
|
||||||
|
|
||||||
|
repository: ${TEST_REPO}
|
||||||
|
#
|
||||||
|
# Custom runner image
|
||||||
|
#
|
||||||
|
image: ${RUNNER_NAME}:${RUNNER_TAG}
|
||||||
|
#
|
||||||
|
# dockerd within runner container
|
||||||
|
#
|
||||||
|
## Replace `mumoshu/actions-runner-dind:dev` with your dind image
|
||||||
|
#dockerdWithinRunnerContainer: true
|
||||||
|
#
|
||||||
|
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
|
||||||
|
#
|
||||||
|
#dockerMTU: 1450
|
||||||
|
#Runner group
|
||||||
|
# labels:
|
||||||
|
# - "mylabel 1"
|
||||||
|
# - "mylabel 2"
|
||||||
|
labels:
|
||||||
|
- "${RUNNER_LABEL}"
|
||||||
|
#
|
||||||
|
# Non-standard working directory
|
||||||
|
#
|
||||||
|
# workDir: "/"
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: example-runnerset
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: runner
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
env:
|
||||||
|
- name: RUNNER_FEATURE_FLAG_EPHEMERAL
|
||||||
|
value: "${RUNNER_FEATURE_FLAG_EPHEMERAL}"
|
||||||
|
#- name: docker
|
||||||
|
# #image: mumoshu/actions-runner-dind:dev
|
||||||
20
acceptance/values.yaml
Normal file
20
acceptance/values.yaml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Set actions-runner-controller settings for testing
|
||||||
|
githubAPICacheDuration: 10s
|
||||||
|
githubWebhookServer:
|
||||||
|
enabled: true
|
||||||
|
labels: {}
|
||||||
|
replicaCount: 1
|
||||||
|
syncPeriod: 10m
|
||||||
|
secret:
|
||||||
|
create: true
|
||||||
|
name: "github-webhook-server"
|
||||||
|
### GitHub Webhook Configuration
|
||||||
|
#github_webhook_secret_token: ""
|
||||||
|
service:
|
||||||
|
type: NodePort
|
||||||
|
ports:
|
||||||
|
- port: 80
|
||||||
|
targetPort: http
|
||||||
|
protocol: TCP
|
||||||
|
name: http
|
||||||
|
nodePort: 31000
|
||||||
@@ -54,6 +54,12 @@ type HorizontalRunnerAutoscalerSpec struct {
|
|||||||
ScaleUpTriggers []ScaleUpTrigger `json:"scaleUpTriggers,omitempty"`
|
ScaleUpTriggers []ScaleUpTrigger `json:"scaleUpTriggers,omitempty"`
|
||||||
|
|
||||||
CapacityReservations []CapacityReservation `json:"capacityReservations,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
|
CapacityReservations []CapacityReservation `json:"capacityReservations,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
|
||||||
|
|
||||||
|
// ScheduledOverrides is the list of ScheduledOverride.
|
||||||
|
// It can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||||
|
// The earlier a scheduled override is, the higher it is prioritized.
|
||||||
|
// +optional
|
||||||
|
ScheduledOverrides []ScheduledOverride `json:"scheduledOverrides,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ScaleUpTrigger struct {
|
type ScaleUpTrigger struct {
|
||||||
@@ -78,6 +84,10 @@ type CheckRunSpec struct {
|
|||||||
// Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
|
// Note that check_run name seem to equal to the job name you've defined in your actions workflow yaml file.
|
||||||
// So it is very likely that you can utilize this to trigger depending on the job.
|
// So it is very likely that you can utilize this to trigger depending on the job.
|
||||||
Names []string `json:"names,omitempty"`
|
Names []string `json:"names,omitempty"`
|
||||||
|
|
||||||
|
// Repositories is a list of GitHub repositories.
|
||||||
|
// Any check_run event whose repository matches one of repositories in the list can trigger autoscaling.
|
||||||
|
Repositories []string `json:"repositories,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
// https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||||
@@ -100,6 +110,12 @@ type CapacityReservation struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ScaleTargetRef struct {
|
type ScaleTargetRef struct {
|
||||||
|
// Kind is the type of resource being referenced
|
||||||
|
// +optional
|
||||||
|
// +kubebuilder:validation:Enum=RunnerDeployment;RunnerSet
|
||||||
|
Kind string `json:"kind,omitempty"`
|
||||||
|
|
||||||
|
// Name is the name of resource being referenced
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -144,6 +160,40 @@ type MetricSpec struct {
|
|||||||
ScaleDownAdjustment int `json:"scaleDownAdjustment,omitempty"`
|
ScaleDownAdjustment int `json:"scaleDownAdjustment,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ScheduledOverride can be used to override a few fields of HorizontalRunnerAutoscalerSpec on schedule.
|
||||||
|
// A schedule can optionally be recurring, so that the correspoding override happens every day, week, month, or year.
|
||||||
|
type ScheduledOverride struct {
|
||||||
|
// StartTime is the time at which the first override starts.
|
||||||
|
StartTime metav1.Time `json:"startTime"`
|
||||||
|
|
||||||
|
// EndTime is the time at which the first override ends.
|
||||||
|
EndTime metav1.Time `json:"endTime"`
|
||||||
|
|
||||||
|
// MinReplicas is the number of runners while overriding.
|
||||||
|
// If omitted, it doesn't override minReplicas.
|
||||||
|
// +optional
|
||||||
|
// +nullable
|
||||||
|
// +kubebuilder:validation:Minimum=0
|
||||||
|
MinReplicas *int `json:"minReplicas,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
RecurrenceRule RecurrenceRule `json:"recurrenceRule,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RecurrenceRule struct {
|
||||||
|
// Frequency is the name of a predefined interval of each recurrence.
|
||||||
|
// The valid values are "Daily", "Weekly", "Monthly", and "Yearly".
|
||||||
|
// If empty, the corresponding override happens only once.
|
||||||
|
// +optional
|
||||||
|
// +kubebuilder:validation:Enum=Daily;Weekly;Monthly;Yearly
|
||||||
|
Frequency string `json:"frequency,omitempty"`
|
||||||
|
|
||||||
|
// UntilTime is the time of the final recurrence.
|
||||||
|
// If empty, the schedule recurs forever.
|
||||||
|
// +optional
|
||||||
|
UntilTime metav1.Time `json:"untilTime,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
type HorizontalRunnerAutoscalerStatus struct {
|
type HorizontalRunnerAutoscalerStatus struct {
|
||||||
// ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
|
// ObservedGeneration is the most recent generation observed for the target. It corresponds to e.g.
|
||||||
// RunnerDeployment's generation, which is updated on mutation by the API Server.
|
// RunnerDeployment's generation, which is updated on mutation by the API Server.
|
||||||
@@ -161,6 +211,11 @@ type HorizontalRunnerAutoscalerStatus struct {
|
|||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
CacheEntries []CacheEntry `json:"cacheEntries,omitempty"`
|
CacheEntries []CacheEntry `json:"cacheEntries,omitempty"`
|
||||||
|
|
||||||
|
// ScheduledOverridesSummary is the summary of active and upcoming scheduled overrides to be shown in e.g. a column of a `kubectl get hra` output
|
||||||
|
// for observability.
|
||||||
|
// +optional
|
||||||
|
ScheduledOverridesSummary *string `json:"scheduledOverridesSummary,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
const CacheEntryKeyDesiredReplicas = "desiredReplicas"
|
const CacheEntryKeyDesiredReplicas = "desiredReplicas"
|
||||||
@@ -172,10 +227,12 @@ type CacheEntry struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
|
// +kubebuilder:resource:shortName=hra
|
||||||
// +kubebuilder:subresource:status
|
// +kubebuilder:subresource:status
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.minReplicas",name=Min,type=number
|
// +kubebuilder:printcolumn:JSONPath=".spec.minReplicas",name=Min,type=number
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.maxReplicas",name=Max,type=number
|
// +kubebuilder:printcolumn:JSONPath=".spec.maxReplicas",name=Max,type=number
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.desiredReplicas",name=Desired,type=number
|
// +kubebuilder:printcolumn:JSONPath=".status.desiredReplicas",name=Desired,type=number
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".status.scheduledOverridesSummary",name=Schedule,type=string
|
||||||
|
|
||||||
// HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
|
// HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler API
|
||||||
type HorizontalRunnerAutoscaler struct {
|
type HorizontalRunnerAutoscaler struct {
|
||||||
|
|||||||
@@ -19,12 +19,19 @@ package v1alpha1
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunnerSpec defines the desired state of Runner
|
// RunnerSpec defines the desired state of Runner
|
||||||
type RunnerSpec struct {
|
type RunnerSpec struct {
|
||||||
|
RunnerConfig `json:",inline"`
|
||||||
|
RunnerPodSpec `json:",inline"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RunnerConfig struct {
|
||||||
// +optional
|
// +optional
|
||||||
// +kubebuilder:validation:Pattern=`^[^/]+$`
|
// +kubebuilder:validation:Pattern=`^[^/]+$`
|
||||||
Enterprise string `json:"enterprise,omitempty"`
|
Enterprise string `json:"enterprise,omitempty"`
|
||||||
@@ -44,58 +51,103 @@ type RunnerSpec struct {
|
|||||||
Group string `json:"group,omitempty"`
|
Group string `json:"group,omitempty"`
|
||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
Containers []corev1.Container `json:"containers,omitempty"`
|
Ephemeral *bool `json:"ephemeral,omitempty"`
|
||||||
// +optional
|
|
||||||
DockerdContainerResources corev1.ResourceRequirements `json:"dockerdContainerResources,omitempty"`
|
|
||||||
// +optional
|
|
||||||
DockerVolumeMounts []corev1.VolumeMount `json:"dockerVolumeMounts,omitempty"`
|
|
||||||
// +optional
|
|
||||||
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
|
|
||||||
// +optional
|
|
||||||
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
|
|
||||||
// +optional
|
|
||||||
EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"`
|
|
||||||
|
|
||||||
// +optional
|
// +optional
|
||||||
Image string `json:"image"`
|
Image string `json:"image"`
|
||||||
// +optional
|
|
||||||
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
|
|
||||||
// +optional
|
|
||||||
Env []corev1.EnvVar `json:"env,omitempty"`
|
|
||||||
|
|
||||||
// +optional
|
|
||||||
Volumes []corev1.Volume `json:"volumes,omitempty"`
|
|
||||||
// +optional
|
// +optional
|
||||||
WorkDir string `json:"workDir,omitempty"`
|
WorkDir string `json:"workDir,omitempty"`
|
||||||
|
|
||||||
// +optional
|
|
||||||
InitContainers []corev1.Container `json:"initContainers,omitempty"`
|
|
||||||
// +optional
|
|
||||||
SidecarContainers []corev1.Container `json:"sidecarContainers,omitempty"`
|
|
||||||
// +optional
|
|
||||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
|
||||||
// +optional
|
|
||||||
ServiceAccountName string `json:"serviceAccountName,omitempty"`
|
|
||||||
// +optional
|
|
||||||
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
|
|
||||||
// +optional
|
|
||||||
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
|
|
||||||
// +optional
|
|
||||||
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
|
||||||
// +optional
|
|
||||||
Affinity *corev1.Affinity `json:"affinity,omitempty"`
|
|
||||||
// +optional
|
|
||||||
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
|
||||||
// +optional
|
|
||||||
EphemeralContainers []corev1.EphemeralContainer `json:"ephemeralContainers,omitempty"`
|
|
||||||
// +optional
|
|
||||||
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
|
|
||||||
// +optional
|
// +optional
|
||||||
DockerdWithinRunnerContainer *bool `json:"dockerdWithinRunnerContainer,omitempty"`
|
DockerdWithinRunnerContainer *bool `json:"dockerdWithinRunnerContainer,omitempty"`
|
||||||
// +optional
|
// +optional
|
||||||
DockerEnabled *bool `json:"dockerEnabled,omitempty"`
|
DockerEnabled *bool `json:"dockerEnabled,omitempty"`
|
||||||
// +optional
|
// +optional
|
||||||
DockerMTU *int64 `json:"dockerMTU,omitempty"`
|
DockerMTU *int64 `json:"dockerMTU,omitempty"`
|
||||||
|
// +optional
|
||||||
|
DockerRegistryMirror *string `json:"dockerRegistryMirror,omitempty"`
|
||||||
|
// +optional
|
||||||
|
VolumeSizeLimit *resource.Quantity `json:"volumeSizeLimit,omitempty"`
|
||||||
|
// +optional
|
||||||
|
VolumeStorageMedium *string `json:"volumeStorageMedium,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunnerPodSpec defines the desired pod spec fields of the runner pod
|
||||||
|
type RunnerPodSpec struct {
|
||||||
|
// +optional
|
||||||
|
DockerdContainerResources corev1.ResourceRequirements `json:"dockerdContainerResources,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
DockerVolumeMounts []corev1.VolumeMount `json:"dockerVolumeMounts,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
Containers []corev1.Container `json:"containers,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
Env []corev1.EnvVar `json:"env,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
Volumes []corev1.Volume `json:"volumes,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
InitContainers []corev1.Container `json:"initContainers,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
ServiceAccountName string `json:"serviceAccountName,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
SidecarContainers []corev1.Container `json:"sidecarContainers,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
Affinity *corev1.Affinity `json:"affinity,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
EphemeralContainers []corev1.EphemeralContainer `json:"ephemeralContainers,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
HostAliases []corev1.HostAlias `json:"hostAliases,omitempty"`
|
||||||
|
|
||||||
|
// RuntimeClassName is the container runtime configuration that containers should run under.
|
||||||
|
// More info: https://kubernetes.io/docs/concepts/containers/runtime-class
|
||||||
|
// +optional
|
||||||
|
RuntimeClassName *string `json:"runtimeClassName,omitempty"`
|
||||||
|
|
||||||
|
// +optional
|
||||||
|
DnsConfig []corev1.PodDNSConfig `json:"dnsConfig,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateRepository validates repository field.
|
// ValidateRepository validates repository field.
|
||||||
@@ -153,6 +205,7 @@ type RunnerStatusRegistration struct {
|
|||||||
// +kubebuilder:printcolumn:JSONPath=".spec.repository",name=Repository,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.repository",name=Repository,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
|
// +kubebuilder:printcolumn:JSONPath=".spec.labels",name=Labels,type=string
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
|
// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Status,type=string
|
||||||
|
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||||
|
|
||||||
// Runner is the Schema for the runners API
|
// Runner is the Schema for the runners API
|
||||||
type Runner struct {
|
type Runner struct {
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -34,7 +34,7 @@ func (r *Runner) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
|||||||
Complete()
|
Complete()
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=mutate.runner.actions.summerwind.dev
|
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=mutate.runner.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
|
||||||
|
|
||||||
var _ webhook.Defaulter = &Runner{}
|
var _ webhook.Defaulter = &Runner{}
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ func (r *Runner) Default() {
|
|||||||
// Nothing to do.
|
// Nothing to do.
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=validate.runner.actions.summerwind.dev
|
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runner,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runners,versions=v1alpha1,name=validate.runner.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
|
||||||
|
|
||||||
var _ webhook.Validator = &Runner{}
|
var _ webhook.Validator = &Runner{}
|
||||||
|
|
||||||
|
|||||||
@@ -38,20 +38,42 @@ type RunnerDeploymentSpec struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type RunnerDeploymentStatus struct {
|
type RunnerDeploymentStatus struct {
|
||||||
AvailableReplicas int `json:"availableReplicas"`
|
// See K8s deployment controller code for reference
|
||||||
ReadyReplicas int `json:"readyReplicas"`
|
// https://github.com/kubernetes/kubernetes/blob/ea0764452222146c47ec826977f49d7001b0ea8c/pkg/controller/deployment/sync.go#L487-L505
|
||||||
|
|
||||||
// Replicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
|
// AvailableReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||||
|
// This corresponds to the sum of status.availableReplicas of all the runner replica sets.
|
||||||
|
// +optional
|
||||||
|
AvailableReplicas *int `json:"availableReplicas"`
|
||||||
|
|
||||||
|
// ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||||
|
// This corresponds to the sum of status.readyReplicas of all the runner replica sets.
|
||||||
|
// +optional
|
||||||
|
ReadyReplicas *int `json:"readyReplicas"`
|
||||||
|
|
||||||
|
// ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||||
|
// This corresponds to status.replicas of the runner replica set that has the desired template hash.
|
||||||
|
// +optional
|
||||||
|
UpdatedReplicas *int `json:"updatedReplicas"`
|
||||||
|
|
||||||
|
// DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
|
||||||
// This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
// This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
||||||
// +optional
|
// +optional
|
||||||
Replicas *int `json:"desiredReplicas,omitempty"`
|
DesiredReplicas *int `json:"desiredReplicas"`
|
||||||
|
|
||||||
|
// Replicas is the total number of replicas
|
||||||
|
// +optional
|
||||||
|
Replicas *int `json:"replicas"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
|
// +kubebuilder:resource:shortName=rdeploy
|
||||||
// +kubebuilder:subresource:status
|
// +kubebuilder:subresource:status
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Current,type=number
|
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.readyReplicas",name=Ready,type=number
|
// +kubebuilder:printcolumn:JSONPath=".status.updatedReplicas",name=Up-To-Date,type=number
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Available,type=number
|
||||||
|
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||||
|
|
||||||
// RunnerDeployment is the Schema for the runnerdeployments API
|
// RunnerDeployment is the Schema for the runnerdeployments API
|
||||||
type RunnerDeployment struct {
|
type RunnerDeployment struct {
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -34,7 +34,7 @@ func (r *RunnerDeployment) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
|||||||
Complete()
|
Complete()
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerdeployment,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerdeployments,versions=v1alpha1,name=mutate.runnerdeployment.actions.summerwind.dev
|
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerdeployment,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerdeployments,versions=v1alpha1,name=mutate.runnerdeployment.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
|
||||||
|
|
||||||
var _ webhook.Defaulter = &RunnerDeployment{}
|
var _ webhook.Defaulter = &RunnerDeployment{}
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ func (r *RunnerDeployment) Default() {
|
|||||||
// Nothing to do.
|
// Nothing to do.
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerdeployment,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerdeployments,versions=v1alpha1,name=validate.runnerdeployment.actions.summerwind.dev
|
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerdeployment,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerdeployments,versions=v1alpha1,name=validate.runnerdeployment.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
|
||||||
|
|
||||||
var _ webhook.Validator = &RunnerDeployment{}
|
var _ webhook.Validator = &RunnerDeployment{}
|
||||||
|
|
||||||
|
|||||||
@@ -33,8 +33,19 @@ type RunnerReplicaSetSpec struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type RunnerReplicaSetStatus struct {
|
type RunnerReplicaSetStatus struct {
|
||||||
AvailableReplicas int `json:"availableReplicas"`
|
// See K8s replicaset controller code for reference
|
||||||
ReadyReplicas int `json:"readyReplicas"`
|
// https://github.com/kubernetes/kubernetes/blob/ea0764452222146c47ec826977f49d7001b0ea8c/pkg/controller/replicaset/replica_set_utils.go#L101-L106
|
||||||
|
|
||||||
|
// Replicas is the number of runners that are created and still being managed by this runner replica set.
|
||||||
|
// +optional
|
||||||
|
Replicas *int `json:"replicas"`
|
||||||
|
|
||||||
|
// ReadyReplicas is the number of runners that are created and Runnning.
|
||||||
|
ReadyReplicas *int `json:"readyReplicas"`
|
||||||
|
|
||||||
|
// AvailableReplicas is the number of runners that are created and Runnning.
|
||||||
|
// This is currently same as ReadyReplicas but perserved for future use.
|
||||||
|
AvailableReplicas *int `json:"availableReplicas"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type RunnerTemplate struct {
|
type RunnerTemplate struct {
|
||||||
@@ -44,10 +55,12 @@ type RunnerTemplate struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:object:root=true
|
// +kubebuilder:object:root=true
|
||||||
|
// +kubebuilder:resource:shortName=rrs
|
||||||
// +kubebuilder:subresource:status
|
// +kubebuilder:subresource:status
|
||||||
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Current,type=number
|
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
|
||||||
// +kubebuilder:printcolumn:JSONPath=".status.readyReplicas",name=Ready,type=number
|
// +kubebuilder:printcolumn:JSONPath=".status.readyReplicas",name=Ready,type=number
|
||||||
|
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||||
|
|
||||||
// RunnerReplicaSet is the Schema for the runnerreplicasets API
|
// RunnerReplicaSet is the Schema for the runnerreplicasets API
|
||||||
type RunnerReplicaSet struct {
|
type RunnerReplicaSet struct {
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -34,7 +34,7 @@ func (r *RunnerReplicaSet) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
|||||||
Complete()
|
Complete()
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=mutate.runnerreplicaset.actions.summerwind.dev
|
// +kubebuilder:webhook:path=/mutate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=true,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=mutate.runnerreplicaset.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
|
||||||
|
|
||||||
var _ webhook.Defaulter = &RunnerReplicaSet{}
|
var _ webhook.Defaulter = &RunnerReplicaSet{}
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ func (r *RunnerReplicaSet) Default() {
|
|||||||
// Nothing to do.
|
// Nothing to do.
|
||||||
}
|
}
|
||||||
|
|
||||||
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=validate.runnerreplicaset.actions.summerwind.dev
|
// +kubebuilder:webhook:path=/validate-actions-summerwind-dev-v1alpha1-runnerreplicaset,verbs=create;update,mutating=false,failurePolicy=fail,groups=actions.summerwind.dev,resources=runnerreplicasets,versions=v1alpha1,name=validate.runnerreplicaset.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
|
||||||
|
|
||||||
var _ webhook.Validator = &RunnerReplicaSet{}
|
var _ webhook.Validator = &RunnerReplicaSet{}
|
||||||
|
|
||||||
|
|||||||
88
api/v1alpha1/runnerset_types.go
Normal file
88
api/v1alpha1/runnerset_types.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2021 The actions-runner-controller authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package v1alpha1
|
||||||
|
|
||||||
|
import (
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RunnerSetSpec defines the desired state of RunnerSet
|
||||||
|
type RunnerSetSpec struct {
|
||||||
|
RunnerConfig `json:",inline"`
|
||||||
|
|
||||||
|
appsv1.StatefulSetSpec `json:",inline"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RunnerSetStatus struct {
|
||||||
|
// See K8s deployment controller code for reference
|
||||||
|
// https://github.com/kubernetes/kubernetes/blob/ea0764452222146c47ec826977f49d7001b0ea8c/pkg/controller/deployment/sync.go#L487-L505
|
||||||
|
|
||||||
|
// AvailableReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||||
|
// This corresponds to the sum of status.availableReplicas of all the runner replica sets.
|
||||||
|
// +optional
|
||||||
|
CurrentReplicas *int `json:"availableReplicas"`
|
||||||
|
|
||||||
|
// ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||||
|
// This corresponds to the sum of status.readyReplicas of all the runner replica sets.
|
||||||
|
// +optional
|
||||||
|
ReadyReplicas *int `json:"readyReplicas"`
|
||||||
|
|
||||||
|
// ReadyReplicas is the total number of available runners which have been successfully registered to GitHub and still running.
|
||||||
|
// This corresponds to status.replicas of the runner replica set that has the desired template hash.
|
||||||
|
// +optional
|
||||||
|
UpdatedReplicas *int `json:"updatedReplicas"`
|
||||||
|
|
||||||
|
// DesiredReplicas is the total number of desired, non-terminated and latest pods to be set for the primary RunnerSet
|
||||||
|
// This doesn't include outdated pods while upgrading the deployment and replacing the runnerset.
|
||||||
|
// +optional
|
||||||
|
DesiredReplicas *int `json:"desiredReplicas"`
|
||||||
|
|
||||||
|
// Replicas is the total number of replicas
|
||||||
|
// +optional
|
||||||
|
Replicas *int `json:"replicas"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
// +kubebuilder:subresource:status
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name=Desired,type=number
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".status.replicas",name=Current,type=number
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".status.updatedReplicas",name=Up-To-Date,type=number
|
||||||
|
// +kubebuilder:printcolumn:JSONPath=".status.availableReplicas",name=Available,type=number
|
||||||
|
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
|
||||||
|
|
||||||
|
// RunnerSet is the Schema for the runnersets API
|
||||||
|
type RunnerSet struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||||
|
|
||||||
|
Spec RunnerSetSpec `json:"spec,omitempty"`
|
||||||
|
Status RunnerSetStatus `json:"status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:object:root=true
|
||||||
|
|
||||||
|
// RunnerList contains a list of Runner
|
||||||
|
type RunnerSetList struct {
|
||||||
|
metav1.TypeMeta `json:",inline"`
|
||||||
|
metav1.ListMeta `json:"metadata,omitempty"`
|
||||||
|
Items []RunnerSet `json:"items"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
SchemeBuilder.Register(&RunnerSet{}, &RunnerSetList{})
|
||||||
|
}
|
||||||
@@ -71,6 +71,11 @@ func (in *CheckRunSpec) DeepCopyInto(out *CheckRunSpec) {
|
|||||||
*out = make([]string, len(*in))
|
*out = make([]string, len(*in))
|
||||||
copy(*out, *in)
|
copy(*out, *in)
|
||||||
}
|
}
|
||||||
|
if in.Repositories != nil {
|
||||||
|
in, out := &in.Repositories, &out.Repositories
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckRunSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckRunSpec.
|
||||||
@@ -212,6 +217,13 @@ func (in *HorizontalRunnerAutoscalerSpec) DeepCopyInto(out *HorizontalRunnerAuto
|
|||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.ScheduledOverrides != nil {
|
||||||
|
in, out := &in.ScheduledOverrides, &out.ScheduledOverrides
|
||||||
|
*out = make([]ScheduledOverride, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerSpec.
|
||||||
@@ -243,6 +255,11 @@ func (in *HorizontalRunnerAutoscalerStatus) DeepCopyInto(out *HorizontalRunnerAu
|
|||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if in.ScheduledOverridesSummary != nil {
|
||||||
|
in, out := &in.ScheduledOverridesSummary, &out.ScheduledOverridesSummary
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerStatus.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalRunnerAutoscalerStatus.
|
||||||
@@ -315,6 +332,22 @@ func (in *PushSpec) DeepCopy() *PushSpec {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *RecurrenceRule) DeepCopyInto(out *RecurrenceRule) {
|
||||||
|
*out = *in
|
||||||
|
in.UntilTime.DeepCopyInto(&out.UntilTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecurrenceRule.
|
||||||
|
func (in *RecurrenceRule) DeepCopy() *RecurrenceRule {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(RecurrenceRule)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *Runner) DeepCopyInto(out *Runner) {
|
func (in *Runner) DeepCopyInto(out *Runner) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -342,6 +375,61 @@ func (in *Runner) DeepCopyObject() runtime.Object {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *RunnerConfig) DeepCopyInto(out *RunnerConfig) {
|
||||||
|
*out = *in
|
||||||
|
if in.Labels != nil {
|
||||||
|
in, out := &in.Labels, &out.Labels
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.Ephemeral != nil {
|
||||||
|
in, out := &in.Ephemeral, &out.Ephemeral
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.DockerdWithinRunnerContainer != nil {
|
||||||
|
in, out := &in.DockerdWithinRunnerContainer, &out.DockerdWithinRunnerContainer
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.DockerEnabled != nil {
|
||||||
|
in, out := &in.DockerEnabled, &out.DockerEnabled
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.DockerMTU != nil {
|
||||||
|
in, out := &in.DockerMTU, &out.DockerMTU
|
||||||
|
*out = new(int64)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.DockerRegistryMirror != nil {
|
||||||
|
in, out := &in.DockerRegistryMirror, &out.DockerRegistryMirror
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.VolumeSizeLimit != nil {
|
||||||
|
in, out := &in.VolumeSizeLimit, &out.VolumeSizeLimit
|
||||||
|
x := (*in).DeepCopy()
|
||||||
|
*out = &x
|
||||||
|
}
|
||||||
|
if in.VolumeStorageMedium != nil {
|
||||||
|
in, out := &in.VolumeStorageMedium, &out.VolumeStorageMedium
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerConfig.
|
||||||
|
func (in *RunnerConfig) DeepCopy() *RunnerConfig {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(RunnerConfig)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *RunnerDeployment) DeepCopyInto(out *RunnerDeployment) {
|
func (in *RunnerDeployment) DeepCopyInto(out *RunnerDeployment) {
|
||||||
*out = *in
|
*out = *in
|
||||||
@@ -430,6 +518,26 @@ func (in *RunnerDeploymentSpec) DeepCopy() *RunnerDeploymentSpec {
|
|||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *RunnerDeploymentStatus) DeepCopyInto(out *RunnerDeploymentStatus) {
|
func (in *RunnerDeploymentStatus) DeepCopyInto(out *RunnerDeploymentStatus) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.AvailableReplicas != nil {
|
||||||
|
in, out := &in.AvailableReplicas, &out.AvailableReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.ReadyReplicas != nil {
|
||||||
|
in, out := &in.ReadyReplicas, &out.ReadyReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.UpdatedReplicas != nil {
|
||||||
|
in, out := &in.UpdatedReplicas, &out.UpdatedReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.DesiredReplicas != nil {
|
||||||
|
in, out := &in.DesiredReplicas, &out.DesiredReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
if in.Replicas != nil {
|
if in.Replicas != nil {
|
||||||
in, out := &in.Replicas, &out.Replicas
|
in, out := &in.Replicas, &out.Replicas
|
||||||
*out = new(int)
|
*out = new(int)
|
||||||
@@ -479,13 +587,156 @@ func (in *RunnerList) DeepCopyObject() runtime.Object {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *RunnerPodSpec) DeepCopyInto(out *RunnerPodSpec) {
|
||||||
|
*out = *in
|
||||||
|
in.DockerdContainerResources.DeepCopyInto(&out.DockerdContainerResources)
|
||||||
|
if in.DockerVolumeMounts != nil {
|
||||||
|
in, out := &in.DockerVolumeMounts, &out.DockerVolumeMounts
|
||||||
|
*out = make([]v1.VolumeMount, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.Containers != nil {
|
||||||
|
in, out := &in.Containers, &out.Containers
|
||||||
|
*out = make([]v1.Container, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.Env != nil {
|
||||||
|
in, out := &in.Env, &out.Env
|
||||||
|
*out = make([]v1.EnvVar, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.EnvFrom != nil {
|
||||||
|
in, out := &in.EnvFrom, &out.EnvFrom
|
||||||
|
*out = make([]v1.EnvFromSource, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
in.Resources.DeepCopyInto(&out.Resources)
|
||||||
|
if in.VolumeMounts != nil {
|
||||||
|
in, out := &in.VolumeMounts, &out.VolumeMounts
|
||||||
|
*out = make([]v1.VolumeMount, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.Volumes != nil {
|
||||||
|
in, out := &in.Volumes, &out.Volumes
|
||||||
|
*out = make([]v1.Volume, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.EnableServiceLinks != nil {
|
||||||
|
in, out := &in.EnableServiceLinks, &out.EnableServiceLinks
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.InitContainers != nil {
|
||||||
|
in, out := &in.InitContainers, &out.InitContainers
|
||||||
|
*out = make([]v1.Container, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.NodeSelector != nil {
|
||||||
|
in, out := &in.NodeSelector, &out.NodeSelector
|
||||||
|
*out = make(map[string]string, len(*in))
|
||||||
|
for key, val := range *in {
|
||||||
|
(*out)[key] = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.AutomountServiceAccountToken != nil {
|
||||||
|
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
|
||||||
|
*out = new(bool)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.SidecarContainers != nil {
|
||||||
|
in, out := &in.SidecarContainers, &out.SidecarContainers
|
||||||
|
*out = make([]v1.Container, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.SecurityContext != nil {
|
||||||
|
in, out := &in.SecurityContext, &out.SecurityContext
|
||||||
|
*out = new(v1.PodSecurityContext)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.ImagePullSecrets != nil {
|
||||||
|
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
|
||||||
|
*out = make([]v1.LocalObjectReference, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
|
if in.Affinity != nil {
|
||||||
|
in, out := &in.Affinity, &out.Affinity
|
||||||
|
*out = new(v1.Affinity)
|
||||||
|
(*in).DeepCopyInto(*out)
|
||||||
|
}
|
||||||
|
if in.Tolerations != nil {
|
||||||
|
in, out := &in.Tolerations, &out.Tolerations
|
||||||
|
*out = make([]v1.Toleration, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.TerminationGracePeriodSeconds != nil {
|
||||||
|
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
|
||||||
|
*out = new(int64)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.EphemeralContainers != nil {
|
||||||
|
in, out := &in.EphemeralContainers, &out.EphemeralContainers
|
||||||
|
*out = make([]v1.EphemeralContainer, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.HostAliases != nil {
|
||||||
|
in, out := &in.HostAliases, &out.HostAliases
|
||||||
|
*out = make([]v1.HostAlias, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if in.RuntimeClassName != nil {
|
||||||
|
in, out := &in.RuntimeClassName, &out.RuntimeClassName
|
||||||
|
*out = new(string)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.DnsConfig != nil {
|
||||||
|
in, out := &in.DnsConfig, &out.DnsConfig
|
||||||
|
*out = make([]v1.PodDNSConfig, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerPodSpec.
|
||||||
|
func (in *RunnerPodSpec) DeepCopy() *RunnerPodSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(RunnerPodSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *RunnerReplicaSet) DeepCopyInto(out *RunnerReplicaSet) {
|
func (in *RunnerReplicaSet) DeepCopyInto(out *RunnerReplicaSet) {
|
||||||
*out = *in
|
*out = *in
|
||||||
out.TypeMeta = in.TypeMeta
|
out.TypeMeta = in.TypeMeta
|
||||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
in.Spec.DeepCopyInto(&out.Spec)
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
out.Status = in.Status
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerReplicaSet.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerReplicaSet.
|
||||||
@@ -567,6 +818,21 @@ func (in *RunnerReplicaSetSpec) DeepCopy() *RunnerReplicaSetSpec {
|
|||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *RunnerReplicaSetStatus) DeepCopyInto(out *RunnerReplicaSetStatus) {
|
func (in *RunnerReplicaSetStatus) DeepCopyInto(out *RunnerReplicaSetStatus) {
|
||||||
*out = *in
|
*out = *in
|
||||||
|
if in.Replicas != nil {
|
||||||
|
in, out := &in.Replicas, &out.Replicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.ReadyReplicas != nil {
|
||||||
|
in, out := &in.ReadyReplicas, &out.ReadyReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.AvailableReplicas != nil {
|
||||||
|
in, out := &in.AvailableReplicas, &out.AvailableReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerReplicaSetStatus.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerReplicaSetStatus.
|
||||||
@@ -579,133 +845,127 @@ func (in *RunnerReplicaSetStatus) DeepCopy() *RunnerReplicaSetStatus {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *RunnerSet) DeepCopyInto(out *RunnerSet) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||||
|
in.Spec.DeepCopyInto(&out.Spec)
|
||||||
|
in.Status.DeepCopyInto(&out.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSet.
|
||||||
|
func (in *RunnerSet) DeepCopy() *RunnerSet {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(RunnerSet)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *RunnerSet) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *RunnerSetList) DeepCopyInto(out *RunnerSetList) {
|
||||||
|
*out = *in
|
||||||
|
out.TypeMeta = in.TypeMeta
|
||||||
|
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||||
|
if in.Items != nil {
|
||||||
|
in, out := &in.Items, &out.Items
|
||||||
|
*out = make([]RunnerSet, len(*in))
|
||||||
|
for i := range *in {
|
||||||
|
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSetList.
|
||||||
|
func (in *RunnerSetList) DeepCopy() *RunnerSetList {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(RunnerSetList)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||||
|
func (in *RunnerSetList) DeepCopyObject() runtime.Object {
|
||||||
|
if c := in.DeepCopy(); c != nil {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *RunnerSetSpec) DeepCopyInto(out *RunnerSetSpec) {
|
||||||
|
*out = *in
|
||||||
|
in.RunnerConfig.DeepCopyInto(&out.RunnerConfig)
|
||||||
|
in.StatefulSetSpec.DeepCopyInto(&out.StatefulSetSpec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSetSpec.
|
||||||
|
func (in *RunnerSetSpec) DeepCopy() *RunnerSetSpec {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(RunnerSetSpec)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *RunnerSetStatus) DeepCopyInto(out *RunnerSetStatus) {
|
||||||
|
*out = *in
|
||||||
|
if in.CurrentReplicas != nil {
|
||||||
|
in, out := &in.CurrentReplicas, &out.CurrentReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.ReadyReplicas != nil {
|
||||||
|
in, out := &in.ReadyReplicas, &out.ReadyReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.UpdatedReplicas != nil {
|
||||||
|
in, out := &in.UpdatedReplicas, &out.UpdatedReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.DesiredReplicas != nil {
|
||||||
|
in, out := &in.DesiredReplicas, &out.DesiredReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
if in.Replicas != nil {
|
||||||
|
in, out := &in.Replicas, &out.Replicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSetStatus.
|
||||||
|
func (in *RunnerSetStatus) DeepCopy() *RunnerSetStatus {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(RunnerSetStatus)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
func (in *RunnerSpec) DeepCopyInto(out *RunnerSpec) {
|
func (in *RunnerSpec) DeepCopyInto(out *RunnerSpec) {
|
||||||
*out = *in
|
*out = *in
|
||||||
if in.Labels != nil {
|
in.RunnerConfig.DeepCopyInto(&out.RunnerConfig)
|
||||||
in, out := &in.Labels, &out.Labels
|
in.RunnerPodSpec.DeepCopyInto(&out.RunnerPodSpec)
|
||||||
*out = make([]string, len(*in))
|
|
||||||
copy(*out, *in)
|
|
||||||
}
|
|
||||||
if in.Containers != nil {
|
|
||||||
in, out := &in.Containers, &out.Containers
|
|
||||||
*out = make([]v1.Container, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
in.DockerdContainerResources.DeepCopyInto(&out.DockerdContainerResources)
|
|
||||||
if in.DockerVolumeMounts != nil {
|
|
||||||
in, out := &in.DockerVolumeMounts, &out.DockerVolumeMounts
|
|
||||||
*out = make([]v1.VolumeMount, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
in.Resources.DeepCopyInto(&out.Resources)
|
|
||||||
if in.VolumeMounts != nil {
|
|
||||||
in, out := &in.VolumeMounts, &out.VolumeMounts
|
|
||||||
*out = make([]v1.VolumeMount, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.EnvFrom != nil {
|
|
||||||
in, out := &in.EnvFrom, &out.EnvFrom
|
|
||||||
*out = make([]v1.EnvFromSource, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.Env != nil {
|
|
||||||
in, out := &in.Env, &out.Env
|
|
||||||
*out = make([]v1.EnvVar, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.Volumes != nil {
|
|
||||||
in, out := &in.Volumes, &out.Volumes
|
|
||||||
*out = make([]v1.Volume, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.InitContainers != nil {
|
|
||||||
in, out := &in.InitContainers, &out.InitContainers
|
|
||||||
*out = make([]v1.Container, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.SidecarContainers != nil {
|
|
||||||
in, out := &in.SidecarContainers, &out.SidecarContainers
|
|
||||||
*out = make([]v1.Container, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.NodeSelector != nil {
|
|
||||||
in, out := &in.NodeSelector, &out.NodeSelector
|
|
||||||
*out = make(map[string]string, len(*in))
|
|
||||||
for key, val := range *in {
|
|
||||||
(*out)[key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.AutomountServiceAccountToken != nil {
|
|
||||||
in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.SecurityContext != nil {
|
|
||||||
in, out := &in.SecurityContext, &out.SecurityContext
|
|
||||||
*out = new(v1.PodSecurityContext)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.ImagePullSecrets != nil {
|
|
||||||
in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
|
|
||||||
*out = make([]v1.LocalObjectReference, len(*in))
|
|
||||||
copy(*out, *in)
|
|
||||||
}
|
|
||||||
if in.Affinity != nil {
|
|
||||||
in, out := &in.Affinity, &out.Affinity
|
|
||||||
*out = new(v1.Affinity)
|
|
||||||
(*in).DeepCopyInto(*out)
|
|
||||||
}
|
|
||||||
if in.Tolerations != nil {
|
|
||||||
in, out := &in.Tolerations, &out.Tolerations
|
|
||||||
*out = make([]v1.Toleration, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.EphemeralContainers != nil {
|
|
||||||
in, out := &in.EphemeralContainers, &out.EphemeralContainers
|
|
||||||
*out = make([]v1.EphemeralContainer, len(*in))
|
|
||||||
for i := range *in {
|
|
||||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if in.TerminationGracePeriodSeconds != nil {
|
|
||||||
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
|
|
||||||
*out = new(int64)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.DockerdWithinRunnerContainer != nil {
|
|
||||||
in, out := &in.DockerdWithinRunnerContainer, &out.DockerdWithinRunnerContainer
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.DockerEnabled != nil {
|
|
||||||
in, out := &in.DockerEnabled, &out.DockerEnabled
|
|
||||||
*out = new(bool)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
if in.DockerMTU != nil {
|
|
||||||
in, out := &in.DockerMTU, &out.DockerMTU
|
|
||||||
*out = new(int64)
|
|
||||||
**out = **in
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSpec.
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnerSpec.
|
||||||
@@ -811,3 +1071,26 @@ func (in *ScaleUpTrigger) DeepCopy() *ScaleUpTrigger {
|
|||||||
in.DeepCopyInto(out)
|
in.DeepCopyInto(out)
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ScheduledOverride) DeepCopyInto(out *ScheduledOverride) {
|
||||||
|
*out = *in
|
||||||
|
in.StartTime.DeepCopyInto(&out.StartTime)
|
||||||
|
in.EndTime.DeepCopyInto(&out.EndTime)
|
||||||
|
if in.MinReplicas != nil {
|
||||||
|
in, out := &in.MinReplicas, &out.MinReplicas
|
||||||
|
*out = new(int)
|
||||||
|
**out = **in
|
||||||
|
}
|
||||||
|
in.RecurrenceRule.DeepCopyInto(&out.RecurrenceRule)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledOverride.
|
||||||
|
func (in *ScheduledOverride) DeepCopy() *ScheduledOverride {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ScheduledOverride)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|||||||
@@ -21,3 +21,5 @@
|
|||||||
.idea/
|
.idea/
|
||||||
*.tmproj
|
*.tmproj
|
||||||
.vscode/
|
.vscode/
|
||||||
|
# Docs
|
||||||
|
docs/
|
||||||
@@ -15,17 +15,16 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.10.5
|
version: 0.12.8
|
||||||
|
|
||||||
home: https://github.com/summerwind/actions-runner-controller
|
# Used as the default manager tag value when no tag property is provided in the values.yaml
|
||||||
|
appVersion: 0.19.0
|
||||||
|
|
||||||
|
home: https://github.com/actions-runner-controller/actions-runner-controller
|
||||||
|
|
||||||
sources:
|
sources:
|
||||||
- https://github.com/summerwind/actions-runner-controller
|
- https://github.com/actions-runner-controller/actions-runner-controller
|
||||||
|
|
||||||
maintainers:
|
maintainers:
|
||||||
- name: summerwind
|
- name: actions-runner-controller
|
||||||
email: contact@summerwind.jp
|
url: https://github.com/actions-runner-controller
|
||||||
url: https://github.com/summerwind
|
|
||||||
- name: funkypenguin
|
|
||||||
email: davidy@funkypenguin.co.nz
|
|
||||||
url: https://www.funkypenguin.co.nz
|
|
||||||
|
|||||||
86
charts/actions-runner-controller/README.md
Normal file
86
charts/actions-runner-controller/README.md
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
## Docs
|
||||||
|
|
||||||
|
All additional docs are kept in the `docs/` folder, this README is solely for documenting the values.yaml keys and values
|
||||||
|
|
||||||
|
## Values
|
||||||
|
|
||||||
|
_The values are documented as of HEAD_
|
||||||
|
|
||||||
|
_Default values are the defaults set in the charts values.yaml, some properties have default configurations in the code for when the property is omitted or invalid_
|
||||||
|
|
||||||
|
| Key | Description | Default |
|
||||||
|
|----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------|
|
||||||
|
| `labels` | Set labels to apply to all resources in the chart | |
|
||||||
|
| `replicaCount` | Set the number of controller pods | 1 |
|
||||||
|
| `syncPeriod` | Set the period in which the controler reconciles the desired runners count | 10m |
|
||||||
|
| `githubAPICacheDuration` | Set the cache period for API calls | |
|
||||||
|
| `githubEnterpriseServerURL` | Set the URL for a self-hosted GitHub Enterprise Server | |
|
||||||
|
| `logLevel` | Set the log level of the controller container | |
|
||||||
|
| `authSecret.create` | Deploy the controller auth secret | false |
|
||||||
|
| `authSecret.name` | Set the name of the auth secret | controller-manager |
|
||||||
|
| `authSecret.github_app_id` | The ID of your GitHub App. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||||
|
| `authSecret.github_app_installation_id` | The ID of your GitHub App installation. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||||
|
| `authSecret.github_app_private_key` | The multiline string of your GitHub App's private key. **This can't be set at the same time as `authSecret.github_token`** | |
|
||||||
|
| `authSecret.github_token` | Your chosen GitHub PAT token. **This can't be set at the same time as the `authSecret.github_app_*`** | |
|
||||||
|
| `dockerRegistryMirror` | The default Docker Registry Mirror used by runners. |
|
||||||
|
| `image.repository` | The "repository/image" of the controller container | summerwind/actions-runner-controller |
|
||||||
|
| `image.tag` | The tag of the controller container | |
|
||||||
|
| `image.actionsRunnerRepositoryAndTag` | The "repository/image" of the actions runner container | summerwind/actions-runner:latest |
|
||||||
|
| `image.dindSidecarRepositoryAndTag` | The "repository/image" of the dind sidecar container | docker:dind |
|
||||||
|
| `image.pullPolicy` | The pull policy of the controller image | IfNotPresent |
|
||||||
|
| `metrics.serviceMonitor` | Deploy serviceMonitor kind for for use with prometheus-operator CRDs | false |
|
||||||
|
| `metrics.port` | Set port of metrics service | 8443 |
|
||||||
|
| `metrics.proxy.enabled` | Deploy kube-rbac-proxy container in controller pod | true |
|
||||||
|
| `metrics.proxy.image.repository` | The "repository/image" of the kube-proxy container | quay.io/brancz/kube-rbac-proxy |
|
||||||
|
| `metrics.proxy.image.tag` | The tag of the kube-proxy image to use when pulling the container | v0.10.0 |
|
||||||
|
| `metrics.serviceMonitorLabels` | Set labels to apply to ServiceMonitor resources | |
|
||||||
|
| `imagePullSecrets` | Specifies the secret to be used when pulling the controller pod containers | |
|
||||||
|
| `fullNameOverride` | Override the full resource names | |
|
||||||
|
| `nameOverride` | Override the resource name prefix | |
|
||||||
|
| `serviceAccont.annotations` | Set annotations to the service account | |
|
||||||
|
| `serviceAccount.create` | Deploy the controller pod under a service account | true |
|
||||||
|
| `podAnnotations` | Set annotations for the controller pod | |
|
||||||
|
| `podLabels` | Set labels for the controller pod | |
|
||||||
|
| `serviceAccount.name` | Set the name of the service account | |
|
||||||
|
| `securityContext` | Set the security context for each container in the controller pod | |
|
||||||
|
| `podSecurityContext` | Set the security context to controller pod | |
|
||||||
|
| `service.port` | Set controller service type | |
|
||||||
|
| `service.type` | Set controller service ports | |
|
||||||
|
| `topologySpreadConstraints` | Set the controller pod topologySpreadConstraints | |
|
||||||
|
| `nodeSelector` | Set the controller pod nodeSelector | |
|
||||||
|
| `resources` | Set the controller pod resources | |
|
||||||
|
| `affinity` | Set the controller pod affinity rules | |
|
||||||
|
| `tolerations` | Set the controller pod tolerations | |
|
||||||
|
| `env` | Set environment variables for the controller container | |
|
||||||
|
| `priorityClassName` | Set the controller pod priorityClassName | |
|
||||||
|
| `scope.watchNamespace` | Tells the controller which namespace to watch if `scope.singleNamespace` is true | |
|
||||||
|
| `scope.singleNamespace` | Limit the controller to watch a single namespace | false |
|
||||||
|
| `githubWebhookServer.logLevel` | Set the log level of the githubWebhookServer container | |
|
||||||
|
| `githubWebhookServer.replicaCount` | Set the number of webhook server pods | 1 |
|
||||||
|
| `githubWebhookServer.syncPeriod` | Set the period in which the controller reconciles the resources | 10m |
|
||||||
|
| `githubWebhookServer.enabled` | Deploy the webhook server pod | false |
|
||||||
|
| `githubWebhookServer.secret.create` | Deploy the webhook hook secret | false |
|
||||||
|
| `githubWebhookServer.secret.name` | Set the name of the webhook hook secret | github-webhook-server |
|
||||||
|
| `githubWebhookServer.secret.github_webhook_secret_token` | Set the webhook secret token value | |
|
||||||
|
| `githubWebhookServer.imagePullSecrets` | Specifies the secret to be used when pulling the githubWebhookServer pod containers | |
|
||||||
|
| `githubWebhookServer.nameOveride` | Override the resource name prefix | |
|
||||||
|
| `githubWebhookServer.fullNameOveride` | Override the full resource names | |
|
||||||
|
| `githubWebhookServer.serviceAccount.create` | Deploy the githubWebhookServer under a service account | true |
|
||||||
|
| `githubWebhookServer.serviceAccount.annotations` | Set annotations for the service account | |
|
||||||
|
| `githubWebhookServer.serviceAccount.name` | Set the service account name | |
|
||||||
|
| `githubWebhookServer.podAnnotations` | Set annotations for the githubWebhookServer pod | |
|
||||||
|
| `githubWebhookServer.podLabels` | Set labels for the githubWebhookServer pod | |
|
||||||
|
| `githubWebhookServer.podSecurityContext` | Set the security context to githubWebhookServer pod | |
|
||||||
|
| `githubWebhookServer.securityContext` | Set the security context for each container in the githubWebhookServer pod | |
|
||||||
|
| `githubWebhookServer.resources` | Set the githubWebhookServer pod resources | |
|
||||||
|
| `githubWebhookServer.topologySpreadConstraints` | Set the githubWebhookServer pod topologySpreadConstraints | |
|
||||||
|
| `githubWebhookServer.nodeSelector` | Set the githubWebhookServer pod nodeSelector | |
|
||||||
|
| `githubWebhookServer.tolerations` | Set the githubWebhookServer pod tolerations | |
|
||||||
|
| `githubWebhookServer.affinity` | Set the githubWebhookServer pod affinity rules | |
|
||||||
|
| `githubWebhookServer.priorityClassName` | Set the githubWebhookServer pod priorityClassName | |
|
||||||
|
| `githubWebhookServer.service.type` | Set githubWebhookServer service type | |
|
||||||
|
| `githubWebhookServer.service.ports` | Set githubWebhookServer service ports | `[{"port":80, "targetPort:"http", "protocol":"TCP", "name":"http"}]` |
|
||||||
|
| `githubWebhookServer.ingress.enabled` | Deploy an ingress kind for the githubWebhookServer | false |
|
||||||
|
| `githubWebhookServer.ingress.annotations` | Set annotations for the ingress kind | |
|
||||||
|
| `githubWebhookServer.ingress.hosts` | Set hosts configuration for ingress | `[{"host": "chart-example.local", "paths": []}]` |
|
||||||
|
| `githubWebhookServer.ingress.tls` | Set tls configuration for ingress | |
|
||||||
@@ -1,227 +1,304 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: apiextensions.k8s.io/v1beta1
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
controller-gen.kubebuilder.io/version: v0.3.0
|
controller-gen.kubebuilder.io/version: v0.6.0
|
||||||
creationTimestamp: null
|
creationTimestamp: null
|
||||||
name: horizontalrunnerautoscalers.actions.summerwind.dev
|
name: horizontalrunnerautoscalers.actions.summerwind.dev
|
||||||
spec:
|
spec:
|
||||||
additionalPrinterColumns:
|
|
||||||
- JSONPath: .spec.minReplicas
|
|
||||||
name: Min
|
|
||||||
type: number
|
|
||||||
- JSONPath: .spec.maxReplicas
|
|
||||||
name: Max
|
|
||||||
type: number
|
|
||||||
- JSONPath: .status.desiredReplicas
|
|
||||||
name: Desired
|
|
||||||
type: number
|
|
||||||
group: actions.summerwind.dev
|
group: actions.summerwind.dev
|
||||||
names:
|
names:
|
||||||
kind: HorizontalRunnerAutoscaler
|
kind: HorizontalRunnerAutoscaler
|
||||||
listKind: HorizontalRunnerAutoscalerList
|
listKind: HorizontalRunnerAutoscalerList
|
||||||
plural: horizontalrunnerautoscalers
|
plural: horizontalrunnerautoscalers
|
||||||
|
shortNames:
|
||||||
|
- hra
|
||||||
singular: horizontalrunnerautoscaler
|
singular: horizontalrunnerautoscaler
|
||||||
scope: Namespaced
|
scope: Namespaced
|
||||||
subresources:
|
versions:
|
||||||
status: {}
|
- additionalPrinterColumns:
|
||||||
validation:
|
- jsonPath: .spec.minReplicas
|
||||||
openAPIV3Schema:
|
name: Min
|
||||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
|
type: number
|
||||||
API
|
- jsonPath: .spec.maxReplicas
|
||||||
properties:
|
name: Max
|
||||||
apiVersion:
|
type: number
|
||||||
description: 'APIVersion defines the versioned schema of this representation
|
- jsonPath: .status.desiredReplicas
|
||||||
of an object. Servers should convert recognized schemas to the latest
|
name: Desired
|
||||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
type: number
|
||||||
type: string
|
- jsonPath: .status.scheduledOverridesSummary
|
||||||
kind:
|
name: Schedule
|
||||||
description: 'Kind is a string value representing the REST resource this
|
type: string
|
||||||
object represents. Servers may infer this from the endpoint the client
|
name: v1alpha1
|
||||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
schema:
|
||||||
type: string
|
openAPIV3Schema:
|
||||||
metadata:
|
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
|
||||||
type: object
|
API
|
||||||
spec:
|
properties:
|
||||||
description: HorizontalRunnerAutoscalerSpec defines the desired state of
|
apiVersion:
|
||||||
HorizontalRunnerAutoscaler
|
description: 'APIVersion defines the versioned schema of this representation
|
||||||
properties:
|
of an object. Servers should convert recognized schemas to the latest
|
||||||
capacityReservations:
|
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||||
items:
|
type: string
|
||||||
description: CapacityReservation specifies the number of replicas
|
kind:
|
||||||
temporarily added to the scale target until ExpirationTime.
|
description: 'Kind is a string value representing the REST resource this
|
||||||
|
object represents. Servers may infer this from the endpoint the client
|
||||||
|
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||||
|
type: string
|
||||||
|
metadata:
|
||||||
|
type: object
|
||||||
|
spec:
|
||||||
|
description: HorizontalRunnerAutoscalerSpec defines the desired state
|
||||||
|
of HorizontalRunnerAutoscaler
|
||||||
|
properties:
|
||||||
|
capacityReservations:
|
||||||
|
items:
|
||||||
|
description: CapacityReservation specifies the number of replicas
|
||||||
|
temporarily added to the scale target until ExpirationTime.
|
||||||
|
properties:
|
||||||
|
expirationTime:
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
replicas:
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
maxReplicas:
|
||||||
|
description: MinReplicas is the maximum number of replicas the deployment
|
||||||
|
is allowed to scale
|
||||||
|
type: integer
|
||||||
|
metrics:
|
||||||
|
description: Metrics is the collection of various metric targets to
|
||||||
|
calculate desired number of runners
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
repositoryNames:
|
||||||
|
description: RepositoryNames is the list of repository names
|
||||||
|
to be used for calculating the metric. For example, a repository
|
||||||
|
name is the REPO part of `github.com/USER/REPO`.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
scaleDownAdjustment:
|
||||||
|
description: ScaleDownAdjustment is the number of runners removed
|
||||||
|
on scale-down. You can only specify either ScaleDownFactor
|
||||||
|
or ScaleDownAdjustment.
|
||||||
|
type: integer
|
||||||
|
scaleDownFactor:
|
||||||
|
description: ScaleDownFactor is the multiplicative factor applied
|
||||||
|
to the current number of runners used to determine how many
|
||||||
|
pods should be removed.
|
||||||
|
type: string
|
||||||
|
scaleDownThreshold:
|
||||||
|
description: ScaleDownThreshold is the percentage of busy runners
|
||||||
|
less than which will trigger the hpa to scale the runners
|
||||||
|
down.
|
||||||
|
type: string
|
||||||
|
scaleUpAdjustment:
|
||||||
|
description: ScaleUpAdjustment is the number of runners added
|
||||||
|
on scale-up. You can only specify either ScaleUpFactor or
|
||||||
|
ScaleUpAdjustment.
|
||||||
|
type: integer
|
||||||
|
scaleUpFactor:
|
||||||
|
description: ScaleUpFactor is the multiplicative factor applied
|
||||||
|
to the current number of runners used to determine how many
|
||||||
|
pods should be added.
|
||||||
|
type: string
|
||||||
|
scaleUpThreshold:
|
||||||
|
description: ScaleUpThreshold is the percentage of busy runners
|
||||||
|
greater than which will trigger the hpa to scale runners up.
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
description: Type is the type of metric to be used for autoscaling.
|
||||||
|
The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
minReplicas:
|
||||||
|
description: MinReplicas is the minimum number of replicas the deployment
|
||||||
|
is allowed to scale
|
||||||
|
type: integer
|
||||||
|
scaleDownDelaySecondsAfterScaleOut:
|
||||||
|
description: ScaleDownDelaySecondsAfterScaleUp is the approximate
|
||||||
|
delay for a scale down followed by a scale up Used to prevent flapping
|
||||||
|
(down->up->down->... loop)
|
||||||
|
type: integer
|
||||||
|
scaleTargetRef:
|
||||||
|
description: ScaleTargetRef sis the reference to scaled resource like
|
||||||
|
RunnerDeployment
|
||||||
properties:
|
properties:
|
||||||
expirationTime:
|
kind:
|
||||||
format: date-time
|
description: Kind is the type of resource being referenced
|
||||||
|
enum:
|
||||||
|
- RunnerDeployment
|
||||||
|
- RunnerSet
|
||||||
type: string
|
type: string
|
||||||
name:
|
name:
|
||||||
|
description: Name is the name of resource being referenced
|
||||||
type: string
|
type: string
|
||||||
replicas:
|
|
||||||
type: integer
|
|
||||||
type: object
|
type: object
|
||||||
type: array
|
scaleUpTriggers:
|
||||||
maxReplicas:
|
description: "ScaleUpTriggers is an experimental feature to increase
|
||||||
description: MinReplicas is the maximum number of replicas the deployment
|
the desired replicas by 1 on each webhook requested received by
|
||||||
is allowed to scale
|
the webhookBasedAutoscaler. \n This feature requires you to also
|
||||||
type: integer
|
enable and deploy the webhookBasedAutoscaler onto your cluster.
|
||||||
metrics:
|
\n Note that the added runners remain until the next sync period
|
||||||
description: Metrics is the collection of various metric targets to
|
at least, and they may or may not be used by GitHub Actions depending
|
||||||
calculate desired number of runners
|
on the timing. They are intended to be used to gain \"resource slack\"
|
||||||
items:
|
immediately after you receive a webhook from GitHub, so that you
|
||||||
properties:
|
can loosely expect MinReplicas runners to be always available."
|
||||||
repositoryNames:
|
items:
|
||||||
description: RepositoryNames is the list of repository names to
|
properties:
|
||||||
be used for calculating the metric. For example, a repository
|
amount:
|
||||||
name is the REPO part of `github.com/USER/REPO`.
|
type: integer
|
||||||
items:
|
duration:
|
||||||
type: string
|
type: string
|
||||||
type: array
|
githubEvent:
|
||||||
scaleDownAdjustment:
|
properties:
|
||||||
description: ScaleDownAdjustment is the number of runners removed
|
checkRun:
|
||||||
on scale-down. You can only specify either ScaleDownFactor or
|
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||||
ScaleDownAdjustment.
|
properties:
|
||||||
type: integer
|
names:
|
||||||
scaleDownFactor:
|
description: Names is a list of GitHub Actions glob
|
||||||
description: ScaleDownFactor is the multiplicative factor applied
|
patterns. Any check_run event whose name matches one
|
||||||
to the current number of runners used to determine how many
|
of patterns in the list can trigger autoscaling. Note
|
||||||
pods should be removed.
|
that check_run name seem to equal to the job name
|
||||||
type: string
|
you've defined in your actions workflow yaml file.
|
||||||
scaleDownThreshold:
|
So it is very likely that you can utilize this to
|
||||||
description: ScaleDownThreshold is the percentage of busy runners
|
trigger depending on the job.
|
||||||
less than which will trigger the hpa to scale the runners down.
|
items:
|
||||||
type: string
|
type: string
|
||||||
scaleUpAdjustment:
|
type: array
|
||||||
description: ScaleUpAdjustment is the number of runners added
|
repositories:
|
||||||
on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
description: Repositories is a list of GitHub repositories.
|
||||||
type: integer
|
Any check_run event whose repository matches one of
|
||||||
scaleUpFactor:
|
repositories in the list can trigger autoscaling.
|
||||||
description: ScaleUpFactor is the multiplicative factor applied
|
items:
|
||||||
to the current number of runners used to determine how many
|
type: string
|
||||||
pods should be added.
|
type: array
|
||||||
type: string
|
status:
|
||||||
scaleUpThreshold:
|
|
||||||
description: ScaleUpThreshold is the percentage of busy runners
|
|
||||||
greater than which will trigger the hpa to scale runners up.
|
|
||||||
type: string
|
|
||||||
type:
|
|
||||||
description: Type is the type of metric to be used for autoscaling.
|
|
||||||
The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
type: array
|
|
||||||
minReplicas:
|
|
||||||
description: MinReplicas is the minimum number of replicas the deployment
|
|
||||||
is allowed to scale
|
|
||||||
type: integer
|
|
||||||
scaleDownDelaySecondsAfterScaleOut:
|
|
||||||
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay
|
|
||||||
for a scale down followed by a scale up Used to prevent flapping (down->up->down->...
|
|
||||||
loop)
|
|
||||||
type: integer
|
|
||||||
scaleTargetRef:
|
|
||||||
description: ScaleTargetRef sis the reference to scaled resource like
|
|
||||||
RunnerDeployment
|
|
||||||
properties:
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
scaleUpTriggers:
|
|
||||||
description: "ScaleUpTriggers is an experimental feature to increase
|
|
||||||
the desired replicas by 1 on each webhook requested received by the
|
|
||||||
webhookBasedAutoscaler. \n This feature requires you to also enable
|
|
||||||
and deploy the webhookBasedAutoscaler onto your cluster. \n Note that
|
|
||||||
the added runners remain until the next sync period at least, and
|
|
||||||
they may or may not be used by GitHub Actions depending on the timing.
|
|
||||||
They are intended to be used to gain \"resource slack\" immediately
|
|
||||||
after you receive a webhook from GitHub, so that you can loosely expect
|
|
||||||
MinReplicas runners to be always available."
|
|
||||||
items:
|
|
||||||
properties:
|
|
||||||
amount:
|
|
||||||
type: integer
|
|
||||||
duration:
|
|
||||||
type: string
|
|
||||||
githubEvent:
|
|
||||||
properties:
|
|
||||||
checkRun:
|
|
||||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
|
||||||
properties:
|
|
||||||
names:
|
|
||||||
description: Names is a list of GitHub Actions glob patterns.
|
|
||||||
Any check_run event whose name matches one of patterns
|
|
||||||
in the list can trigger autoscaling. Note that check_run
|
|
||||||
name seem to equal to the job name you've defined in
|
|
||||||
your actions workflow yaml file. So it is very likely
|
|
||||||
that you can utilize this to trigger depending on the
|
|
||||||
job.
|
|
||||||
items:
|
|
||||||
type: string
|
type: string
|
||||||
type: array
|
types:
|
||||||
status:
|
items:
|
||||||
type: string
|
type: string
|
||||||
types:
|
type: array
|
||||||
items:
|
type: object
|
||||||
type: string
|
pullRequest:
|
||||||
type: array
|
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||||
type: object
|
properties:
|
||||||
pullRequest:
|
branches:
|
||||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
items:
|
||||||
properties:
|
type: string
|
||||||
branches:
|
type: array
|
||||||
items:
|
types:
|
||||||
type: string
|
items:
|
||||||
type: array
|
type: string
|
||||||
types:
|
type: array
|
||||||
items:
|
type: object
|
||||||
type: string
|
push:
|
||||||
type: array
|
description: PushSpec is the condition for triggering scale-up
|
||||||
type: object
|
on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||||
push:
|
type: object
|
||||||
description: PushSpec is the condition for triggering scale-up
|
type: object
|
||||||
on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
type: object
|
||||||
type: object
|
type: array
|
||||||
type: object
|
scheduledOverrides:
|
||||||
type: object
|
description: ScheduledOverrides is the list of ScheduledOverride.
|
||||||
type: array
|
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec
|
||||||
type: object
|
on schedule. The earlier a scheduled override is, the higher it
|
||||||
status:
|
is prioritized.
|
||||||
properties:
|
items:
|
||||||
cacheEntries:
|
description: ScheduledOverride can be used to override a few fields
|
||||||
items:
|
of HorizontalRunnerAutoscalerSpec on schedule. A schedule can
|
||||||
properties:
|
optionally be recurring, so that the correspoding override happens
|
||||||
expirationTime:
|
every day, week, month, or year.
|
||||||
format: date-time
|
properties:
|
||||||
type: string
|
endTime:
|
||||||
key:
|
description: EndTime is the time at which the first override
|
||||||
type: string
|
ends.
|
||||||
value:
|
format: date-time
|
||||||
type: integer
|
type: string
|
||||||
type: object
|
minReplicas:
|
||||||
type: array
|
description: MinReplicas is the number of runners while overriding.
|
||||||
desiredReplicas:
|
If omitted, it doesn't override minReplicas.
|
||||||
description: DesiredReplicas is the total number of desired, non-terminated
|
minimum: 0
|
||||||
and latest pods to be set for the primary RunnerSet This doesn't include
|
nullable: true
|
||||||
outdated pods while upgrading the deployment and replacing the runnerset.
|
type: integer
|
||||||
type: integer
|
recurrenceRule:
|
||||||
lastSuccessfulScaleOutTime:
|
properties:
|
||||||
format: date-time
|
frequency:
|
||||||
nullable: true
|
description: Frequency is the name of a predefined interval
|
||||||
type: string
|
of each recurrence. The valid values are "Daily", "Weekly",
|
||||||
observedGeneration:
|
"Monthly", and "Yearly". If empty, the corresponding override
|
||||||
description: ObservedGeneration is the most recent generation observed
|
happens only once.
|
||||||
for the target. It corresponds to e.g. RunnerDeployment's generation,
|
enum:
|
||||||
which is updated on mutation by the API Server.
|
- Daily
|
||||||
format: int64
|
- Weekly
|
||||||
type: integer
|
- Monthly
|
||||||
type: object
|
- Yearly
|
||||||
type: object
|
type: string
|
||||||
version: v1alpha1
|
untilTime:
|
||||||
versions:
|
description: UntilTime is the time of the final recurrence.
|
||||||
- name: v1alpha1
|
If empty, the schedule recurs forever.
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
startTime:
|
||||||
|
description: StartTime is the time at which the first override
|
||||||
|
starts.
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- endTime
|
||||||
|
- startTime
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
|
status:
|
||||||
|
properties:
|
||||||
|
cacheEntries:
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
expirationTime:
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
key:
|
||||||
|
type: string
|
||||||
|
value:
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
desiredReplicas:
|
||||||
|
description: DesiredReplicas is the total number of desired, non-terminated
|
||||||
|
and latest pods to be set for the primary RunnerSet This doesn't
|
||||||
|
include outdated pods while upgrading the deployment and replacing
|
||||||
|
the runnerset.
|
||||||
|
type: integer
|
||||||
|
lastSuccessfulScaleOutTime:
|
||||||
|
format: date-time
|
||||||
|
nullable: true
|
||||||
|
type: string
|
||||||
|
observedGeneration:
|
||||||
|
description: ObservedGeneration is the most recent generation observed
|
||||||
|
for the target. It corresponds to e.g. RunnerDeployment's generation,
|
||||||
|
which is updated on mutation by the API Server.
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
scheduledOverridesSummary:
|
||||||
|
description: ScheduledOverridesSummary is the summary of active and
|
||||||
|
upcoming scheduled overrides to be shown in e.g. a column of a `kubectl
|
||||||
|
get hra` output for observability.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
served: true
|
served: true
|
||||||
storage: true
|
storage: true
|
||||||
|
subresources:
|
||||||
|
status: {}
|
||||||
status:
|
status:
|
||||||
acceptedNames:
|
acceptedNames:
|
||||||
kind: ""
|
kind: ""
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
40
charts/actions-runner-controller/docs/UPGRADING.md
Normal file
40
charts/actions-runner-controller/docs/UPGRADING.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
## Upgrading
|
||||||
|
|
||||||
|
This project makes extensive use of CRDs to provide much of its functionality. Helm unfortunately does not support [managing](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/) CRDs by design:
|
||||||
|
|
||||||
|
_The full breakdown as to how they came to this decision and why they have taken the approach they have for dealing with CRDs can be found in [Helm Improvement Proposal 11](https://github.com/helm/community/blob/main/hips/hip-0011.md)_
|
||||||
|
|
||||||
|
```
|
||||||
|
There is no support at this time for upgrading or deleting CRDs using Helm. This was an explicit decision after much
|
||||||
|
community discussion due to the danger for unintentional data loss. Furthermore, there is currently no community
|
||||||
|
consensus around how to handle CRDs and their lifecycle. As this evolves, Helm will add support for those use cases.
|
||||||
|
```
|
||||||
|
|
||||||
|
Helm will do an initial install of CRDs but it will not touch them afterwards (update or delete).
|
||||||
|
|
||||||
|
Additionally, because the project leverages CRDs so extensively you **MUST** run the matching controller app container with its matching CRDs i.e. always redeploy your CRDs if you are changing the app version.
|
||||||
|
|
||||||
|
Due to the above you can't just do a `helm upgrade` to release the latest version of the chart, the best practice steps are recorded below:
|
||||||
|
|
||||||
|
## Steps
|
||||||
|
|
||||||
|
1. Upgrade CRDs
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# REMEMBER TO UPDATE THE CHART_VERSION TO RELEVANT CHART VERISON!!!!
|
||||||
|
CHART_VERSION=0.11.0
|
||||||
|
|
||||||
|
curl -L https://github.com/actions-runner-controller/actions-runner-controller/releases/download/actions-runner-controller-${CHART_VERSION}/actions-runner-controller-${CHART_VERSION}.tgz | tar zxv --strip 1 actions-runner-controller/crds
|
||||||
|
|
||||||
|
kubectl apply -f crds/
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Upgrade the Helm release
|
||||||
|
|
||||||
|
```shell
|
||||||
|
helm upgrade --install \
|
||||||
|
--namespace actions-runner-system \
|
||||||
|
--version ${CHART_VERSION} \
|
||||||
|
actions-runner-controller/actions-runner-controller \
|
||||||
|
actions-runner-controller
|
||||||
|
```
|
||||||
@@ -54,3 +54,7 @@ Create the name of the service account to use
|
|||||||
{{- define "actions-runner-controller-github-webhook-server.roleName" -}}
|
{{- define "actions-runner-controller-github-webhook-server.roleName" -}}
|
||||||
{{- include "actions-runner-controller-github-webhook-server.fullname" . }}
|
{{- include "actions-runner-controller-github-webhook-server.fullname" . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "actions-runner-controller-github-webhook-server.serviceMonitorName" -}}
|
||||||
|
{{- include "actions-runner-controller-github-webhook-server.fullname" . | trunc 47 }}-service-monitor
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -92,10 +92,14 @@ Create the name of the service account to use
|
|||||||
{{- include "actions-runner-controller.fullname" . | trunc 55 }}-webhook
|
{{- include "actions-runner-controller.fullname" . | trunc 55 }}-webhook
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- define "actions-runner-controller.authProxyServiceName" -}}
|
{{- define "actions-runner-controller.metricsServiceName" -}}
|
||||||
{{- include "actions-runner-controller.fullname" . | trunc 47 }}-metrics-service
|
{{- include "actions-runner-controller.fullname" . | trunc 47 }}-metrics-service
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
{{- define "actions-runner-controller.serviceMonitorName" -}}
|
||||||
|
{{- include "actions-runner-controller.fullname" . | trunc 47 }}-service-monitor
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
{{- define "actions-runner-controller.selfsignedIssuerName" -}}
|
{{- define "actions-runner-controller.selfsignedIssuerName" -}}
|
||||||
{{- include "actions-runner-controller.fullname" . }}-selfsigned-issuer
|
{{- include "actions-runner-controller.fullname" . }}-selfsigned-issuer
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
{{- if .Values.metrics.proxy.enabled }}
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
metadata:
|
metadata:
|
||||||
@@ -11,3 +12,4 @@ rules:
|
|||||||
resources:
|
resources:
|
||||||
- subjectaccessreviews
|
- subjectaccessreviews
|
||||||
verbs: ["create"]
|
verbs: ["create"]
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
{{- if .Values.metrics.proxy.enabled }}
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
@@ -10,3 +11,4 @@ subjects:
|
|||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: {{ include "actions-runner-controller.serviceAccountName" . }}
|
name: {{ include "actions-runner-controller.serviceAccountName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -3,12 +3,12 @@ kind: Service
|
|||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
name: {{ include "actions-runner-controller.authProxyServiceName" . }}
|
name: {{ include "actions-runner-controller.metricsServiceName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
- name: https
|
- name: metrics-port
|
||||||
port: 8443
|
port: {{ .Values.metrics.port }}
|
||||||
targetPort: https
|
targetPort: metrics-port
|
||||||
selector:
|
selector:
|
||||||
{{- include "actions-runner-controller.selectorLabels" . | nindent 4 }}
|
{{- include "actions-runner-controller.selectorLabels" . | nindent 4 }}
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
{{- if .Values.metrics.serviceMonitor }}
|
||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: ServiceMonitor
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.metrics.serviceMonitorLabels }}
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
name: {{ include "actions-runner-controller.serviceMonitorName" . }}
|
||||||
|
spec:
|
||||||
|
endpoints:
|
||||||
|
- path: /metrics
|
||||||
|
port: metrics-port
|
||||||
|
{{- if .Values.metrics.proxy.enabled }}
|
||||||
|
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||||
|
scheme: https
|
||||||
|
tlsConfig:
|
||||||
|
insecureSkipVerify: true
|
||||||
|
{{- end }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "actions-runner-controller.selectorLabels" . | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
@@ -18,6 +18,9 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "actions-runner-controller.selectorLabels" . | nindent 8 }}
|
{{- include "actions-runner-controller.selectorLabels" . | nindent 8 }}
|
||||||
|
{{- with .Values.podLabels }}
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
{{- with .Values.imagePullSecrets }}
|
{{- with .Values.imagePullSecrets }}
|
||||||
imagePullSecrets:
|
imagePullSecrets:
|
||||||
@@ -31,16 +34,32 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
containers:
|
containers:
|
||||||
- args:
|
- args:
|
||||||
- "--metrics-addr=127.0.0.1:8080"
|
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||||
|
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
||||||
|
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
||||||
- "--enable-leader-election"
|
- "--enable-leader-election"
|
||||||
- "--sync-period={{ .Values.syncPeriod }}"
|
- "--sync-period={{ .Values.syncPeriod }}"
|
||||||
- "--docker-image={{ .Values.image.dindSidecarRepositoryAndTag }}"
|
- "--docker-image={{ .Values.image.dindSidecarRepositoryAndTag }}"
|
||||||
|
- "--runner-image={{ .Values.image.actionsRunnerRepositoryAndTag }}"
|
||||||
|
{{- if .Values.dockerRegistryMirror }}
|
||||||
|
- "--docker-registry-mirror={{ .Values.dockerRegistryMirror }}"
|
||||||
|
{{- end }}
|
||||||
{{- if .Values.scope.singleNamespace }}
|
{{- if .Values.scope.singleNamespace }}
|
||||||
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
|
- "--watch-namespace={{ default .Release.Namespace .Values.scope.watchNamespace }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if .Values.githubAPICacheDuration }}
|
||||||
|
- "--github-api-cache-duration={{ .Values.githubAPICacheDuration }}"
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.logLevel }}
|
||||||
|
- "--log-level={{ .Values.logLevel }}"
|
||||||
|
{{- end }}
|
||||||
command:
|
command:
|
||||||
- "/manager"
|
- "/manager"
|
||||||
env:
|
env:
|
||||||
|
{{- if .Values.githubEnterpriseServerURL }}
|
||||||
|
- name: GITHUB_ENTERPRISE_URL
|
||||||
|
value: {{ .Values.githubEnterpriseServerURL }}
|
||||||
|
{{- end }}
|
||||||
- name: GITHUB_TOKEN
|
- name: GITHUB_TOKEN
|
||||||
valueFrom:
|
valueFrom:
|
||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
@@ -65,13 +84,18 @@ spec:
|
|||||||
- name: {{ $key }}
|
- name: {{ $key }}
|
||||||
value: {{ $val | quote }}
|
value: {{ $val | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||||
name: manager
|
name: manager
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 9443
|
- containerPort: 9443
|
||||||
name: webhook-server
|
name: webhook-server
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
|
{{- if not .Values.metrics.proxy.enabled }}
|
||||||
|
- containerPort: {{ .Values.metrics.port }}
|
||||||
|
name: metrics-port
|
||||||
|
protocol: TCP
|
||||||
|
{{- end }}
|
||||||
resources:
|
resources:
|
||||||
{{- toYaml .Values.resources | nindent 12 }}
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
securityContext:
|
securityContext:
|
||||||
@@ -85,21 +109,23 @@ spec:
|
|||||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||||
name: cert
|
name: cert
|
||||||
readOnly: true
|
readOnly: true
|
||||||
|
{{- if .Values.metrics.proxy.enabled }}
|
||||||
- args:
|
- args:
|
||||||
- "--secure-listen-address=0.0.0.0:8443"
|
- "--secure-listen-address=0.0.0.0:{{ .Values.metrics.port }}"
|
||||||
- "--upstream=http://127.0.0.1:8080/"
|
- "--upstream=http://127.0.0.1:8080/"
|
||||||
- "--logtostderr=true"
|
- "--logtostderr=true"
|
||||||
- "--v=10"
|
- "--v=10"
|
||||||
image: "{{ .Values.kube_rbac_proxy.image.repository }}:{{ .Values.kube_rbac_proxy.image.tag }}"
|
image: "{{ .Values.metrics.proxy.image.repository }}:{{ .Values.metrics.proxy.image.tag }}"
|
||||||
name: kube-rbac-proxy
|
name: kube-rbac-proxy
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 8443
|
- containerPort: {{ .Values.metrics.port }}
|
||||||
name: https
|
name: metrics-port
|
||||||
resources:
|
resources:
|
||||||
{{- toYaml .Values.resources | nindent 12 }}
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
securityContext:
|
securityContext:
|
||||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
terminationGracePeriodSeconds: 10
|
terminationGracePeriodSeconds: 10
|
||||||
volumes:
|
volumes:
|
||||||
- name: secret
|
- name: secret
|
||||||
@@ -123,3 +149,7 @@ spec:
|
|||||||
tolerations:
|
tolerations:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- with .Values.topologySpreadConstraints }}
|
||||||
|
topologySpreadConstraints:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -19,6 +19,9 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 8 }}
|
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 8 }}
|
||||||
|
{{- with .Values.githubWebhookServer.podLabels }}
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
{{- with .Values.githubWebhookServer.imagePullSecrets }}
|
{{- with .Values.githubWebhookServer.imagePullSecrets }}
|
||||||
imagePullSecrets:
|
imagePullSecrets:
|
||||||
@@ -32,8 +35,13 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
containers:
|
containers:
|
||||||
- args:
|
- args:
|
||||||
- "--metrics-addr=127.0.0.1:8080"
|
{{- $metricsHost := .Values.metrics.proxy.enabled | ternary "127.0.0.1" "0.0.0.0" }}
|
||||||
|
{{- $metricsPort := .Values.metrics.proxy.enabled | ternary "8080" .Values.metrics.port }}
|
||||||
|
- "--metrics-addr={{ $metricsHost }}:{{ $metricsPort }}"
|
||||||
- "--sync-period={{ .Values.githubWebhookServer.syncPeriod }}"
|
- "--sync-period={{ .Values.githubWebhookServer.syncPeriod }}"
|
||||||
|
{{- if .Values.githubWebhookServer.logLevel }}
|
||||||
|
- "--log-level={{ .Values.githubWebhookServer.logLevel }}"
|
||||||
|
{{- end }}
|
||||||
command:
|
command:
|
||||||
- "/github-webhook-server"
|
- "/github-webhook-server"
|
||||||
env:
|
env:
|
||||||
@@ -47,32 +55,39 @@ spec:
|
|||||||
- name: {{ $key }}
|
- name: {{ $key }}
|
||||||
value: {{ $val | quote }}
|
value: {{ $val | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (cat "v" .Chart.AppVersion | replace " " "") }}"
|
||||||
name: github-webhook-server
|
name: github-webhook-server
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 8000
|
- containerPort: 8000
|
||||||
name: http
|
name: http
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
|
{{- if not .Values.metrics.proxy.enabled }}
|
||||||
|
- containerPort: {{ .Values.metrics.port }}
|
||||||
|
name: metrics-port
|
||||||
|
protocol: TCP
|
||||||
|
{{- end }}
|
||||||
resources:
|
resources:
|
||||||
{{- toYaml .Values.githubWebhookServer.resources | nindent 12 }}
|
{{- toYaml .Values.githubWebhookServer.resources | nindent 12 }}
|
||||||
securityContext:
|
securityContext:
|
||||||
{{- toYaml .Values.githubWebhookServer.securityContext | nindent 12 }}
|
{{- toYaml .Values.githubWebhookServer.securityContext | nindent 12 }}
|
||||||
|
{{- if .Values.metrics.proxy.enabled }}
|
||||||
- args:
|
- args:
|
||||||
- "--secure-listen-address=0.0.0.0:8443"
|
- "--secure-listen-address=0.0.0.0:{{ .Values.metrics.port }}"
|
||||||
- "--upstream=http://127.0.0.1:8080/"
|
- "--upstream=http://127.0.0.1:8080/"
|
||||||
- "--logtostderr=true"
|
- "--logtostderr=true"
|
||||||
- "--v=10"
|
- "--v=10"
|
||||||
image: "{{ .Values.kube_rbac_proxy.image.repository }}:{{ .Values.kube_rbac_proxy.image.tag }}"
|
image: "{{ .Values.metrics.proxy.image.repository }}:{{ .Values.metrics.proxy.image.tag }}"
|
||||||
name: kube-rbac-proxy
|
name: kube-rbac-proxy
|
||||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 8443
|
- containerPort: {{ .Values.metrics.port }}
|
||||||
name: https
|
name: metrics-port
|
||||||
resources:
|
resources:
|
||||||
{{- toYaml .Values.resources | nindent 12 }}
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
securityContext:
|
securityContext:
|
||||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
terminationGracePeriodSeconds: 10
|
terminationGracePeriodSeconds: 10
|
||||||
{{- with .Values.githubWebhookServer.nodeSelector }}
|
{{- with .Values.githubWebhookServer.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
@@ -86,4 +101,8 @@ spec:
|
|||||||
tolerations:
|
tolerations:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- with .Values.githubWebhookServer.topologySpreadConstraints }}
|
||||||
|
topologySpreadConstraints:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -35,6 +35,14 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnersets
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- actions.summerwind.dev
|
- actions.summerwind.dev
|
||||||
resources:
|
resources:
|
||||||
@@ -67,4 +75,16 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- authentication.k8s.io
|
||||||
|
resources:
|
||||||
|
- tokenreviews
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- apiGroups:
|
||||||
|
- authorization.k8s.io
|
||||||
|
resources:
|
||||||
|
- subjectaccessreviews
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -6,12 +6,21 @@ metadata:
|
|||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
{{- if .Values.githubWebhookServer.service.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{ toYaml .Values.githubWebhookServer.service.annotations | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
type: {{ .Values.githubWebhookServer.service.type }}
|
type: {{ .Values.githubWebhookServer.service.type }}
|
||||||
ports:
|
ports:
|
||||||
{{ range $_, $port := .Values.githubWebhookServer.service.ports -}}
|
{{ range $_, $port := .Values.githubWebhookServer.service.ports -}}
|
||||||
- {{ $port | toYaml | nindent 6 }}
|
- {{ $port | toYaml | nindent 6 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if .Values.metrics.serviceMonitor }}
|
||||||
|
- name: metrics-port
|
||||||
|
port: {{ .Values.metrics.port }}
|
||||||
|
targetPort: metrics-port
|
||||||
|
{{- end }}
|
||||||
selector:
|
selector:
|
||||||
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 4 }}
|
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -0,0 +1,24 @@
|
|||||||
|
{{- if and .Values.githubWebhookServer.enabled .Values.metrics.serviceMonitor }}
|
||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: ServiceMonitor
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
{{- include "actions-runner-controller.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.metrics.serviceMonitorLabels }}
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
name: {{ include "actions-runner-controller-github-webhook-server.serviceMonitorName" . }}
|
||||||
|
spec:
|
||||||
|
endpoints:
|
||||||
|
- path: /metrics
|
||||||
|
port: metrics-port
|
||||||
|
{{- if .Values.metrics.proxy.enabled }}
|
||||||
|
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||||
|
scheme: https
|
||||||
|
tlsConfig:
|
||||||
|
insecureSkipVerify: true
|
||||||
|
{{- end }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "actions-runner-controller-github-webhook-server.selectorLabels" . | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
@@ -132,6 +132,62 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnersets
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnersets/finalizers
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnersets/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- "apps"
|
||||||
|
resources:
|
||||||
|
- statefulsets
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- "apps"
|
||||||
|
resources:
|
||||||
|
- statefulsets/finalizers
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
@@ -139,6 +195,15 @@ rules:
|
|||||||
verbs:
|
verbs:
|
||||||
- create
|
- create
|
||||||
- patch
|
- patch
|
||||||
|
- apiGroups:
|
||||||
|
- coordination.k8s.io
|
||||||
|
resources:
|
||||||
|
- leases
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- update
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
apiVersion: admissionregistration.k8s.io/v1
|
||||||
kind: MutatingWebhookConfiguration
|
kind: MutatingWebhookConfiguration
|
||||||
metadata:
|
metadata:
|
||||||
creationTimestamp: null
|
creationTimestamp: null
|
||||||
@@ -8,8 +8,9 @@ metadata:
|
|||||||
annotations:
|
annotations:
|
||||||
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "actions-runner-controller.servingCertName" . }}
|
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "actions-runner-controller.servingCertName" . }}
|
||||||
webhooks:
|
webhooks:
|
||||||
- clientConfig:
|
- admissionReviewVersions:
|
||||||
caBundle: Cg==
|
- v1beta1
|
||||||
|
clientConfig:
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
@@ -26,8 +27,10 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runners
|
- runners
|
||||||
- clientConfig:
|
sideEffects: None
|
||||||
caBundle: Cg==
|
- admissionReviewVersions:
|
||||||
|
- v1beta1
|
||||||
|
clientConfig:
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
@@ -44,8 +47,10 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runnerdeployments
|
- runnerdeployments
|
||||||
- clientConfig:
|
sideEffects: None
|
||||||
caBundle: Cg==
|
- admissionReviewVersions:
|
||||||
|
- v1beta1
|
||||||
|
clientConfig:
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
@@ -62,9 +67,31 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runnerreplicasets
|
- runnerreplicasets
|
||||||
|
sideEffects: None
|
||||||
|
- admissionReviewVersions:
|
||||||
|
- v1beta1
|
||||||
|
clientConfig:
|
||||||
|
service:
|
||||||
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
path: /mutate-runner-set-pod
|
||||||
|
failurePolicy: Fail
|
||||||
|
name: mutate-runner-pod.webhook.actions.summerwind.dev
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
apiVersions:
|
||||||
|
- v1
|
||||||
|
operations:
|
||||||
|
- CREATE
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
sideEffects: None
|
||||||
|
objectSelector:
|
||||||
|
matchLabels:
|
||||||
|
"actions-runner-controller/inject-registration-token": "true"
|
||||||
---
|
---
|
||||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
apiVersion: admissionregistration.k8s.io/v1
|
||||||
kind: ValidatingWebhookConfiguration
|
kind: ValidatingWebhookConfiguration
|
||||||
metadata:
|
metadata:
|
||||||
creationTimestamp: null
|
creationTimestamp: null
|
||||||
@@ -72,8 +99,9 @@ metadata:
|
|||||||
annotations:
|
annotations:
|
||||||
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "actions-runner-controller.servingCertName" . }}
|
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "actions-runner-controller.servingCertName" . }}
|
||||||
webhooks:
|
webhooks:
|
||||||
- clientConfig:
|
- admissionReviewVersions:
|
||||||
caBundle: Cg==
|
- v1beta1
|
||||||
|
clientConfig:
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
@@ -90,8 +118,10 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runners
|
- runners
|
||||||
- clientConfig:
|
sideEffects: None
|
||||||
caBundle: Cg==
|
- admissionReviewVersions:
|
||||||
|
- v1beta1
|
||||||
|
clientConfig:
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
@@ -108,8 +138,10 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runnerdeployments
|
- runnerdeployments
|
||||||
- clientConfig:
|
sideEffects: None
|
||||||
caBundle: Cg==
|
- admissionReviewVersions:
|
||||||
|
- v1beta1
|
||||||
|
clientConfig:
|
||||||
service:
|
service:
|
||||||
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
name: {{ include "actions-runner-controller.webhookServiceName" . }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
@@ -126,3 +158,4 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runnerreplicasets
|
- runnerreplicasets
|
||||||
|
sideEffects: None
|
||||||
|
|||||||
@@ -8,10 +8,18 @@ replicaCount: 1
|
|||||||
|
|
||||||
syncPeriod: 10m
|
syncPeriod: 10m
|
||||||
|
|
||||||
|
# The controller tries its best not to repeat the duplicate GitHub API call
|
||||||
|
# within this duration.
|
||||||
|
# Defaults to syncPeriod - 10s.
|
||||||
|
#githubAPICacheDuration: 30s
|
||||||
|
|
||||||
|
# The URL of your GitHub Enterprise server, if you're using one.
|
||||||
|
#githubEnterpriseServerURL: https://github.example.com
|
||||||
|
|
||||||
# Only 1 authentication method can be deployed at a time
|
# Only 1 authentication method can be deployed at a time
|
||||||
# Uncomment the configuration you are applying and fill in the details
|
# Uncomment the configuration you are applying and fill in the details
|
||||||
authSecret:
|
authSecret:
|
||||||
create: true
|
create: false
|
||||||
name: "controller-manager"
|
name: "controller-manager"
|
||||||
### GitHub Apps Configuration
|
### GitHub Apps Configuration
|
||||||
#github_app_id: ""
|
#github_app_id: ""
|
||||||
@@ -20,17 +28,13 @@ authSecret:
|
|||||||
### GitHub PAT Configuration
|
### GitHub PAT Configuration
|
||||||
#github_token: ""
|
#github_token: ""
|
||||||
|
|
||||||
|
dockerRegistryMirror: ""
|
||||||
image:
|
image:
|
||||||
repository: summerwind/actions-runner-controller
|
repository: "summerwind/actions-runner-controller"
|
||||||
tag: "v0.17.0"
|
actionsRunnerRepositoryAndTag: "summerwind/actions-runner:latest"
|
||||||
dindSidecarRepositoryAndTag: "docker:dind"
|
dindSidecarRepositoryAndTag: "docker:dind"
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
|
||||||
kube_rbac_proxy:
|
|
||||||
image:
|
|
||||||
repository: quay.io/brancz/kube-rbac-proxy
|
|
||||||
tag: v0.8.0
|
|
||||||
|
|
||||||
imagePullSecrets: []
|
imagePullSecrets: []
|
||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
fullnameOverride: ""
|
fullnameOverride: ""
|
||||||
@@ -46,6 +50,8 @@ serviceAccount:
|
|||||||
|
|
||||||
podAnnotations: {}
|
podAnnotations: {}
|
||||||
|
|
||||||
|
podLabels: {}
|
||||||
|
|
||||||
podSecurityContext:
|
podSecurityContext:
|
||||||
{}
|
{}
|
||||||
# fsGroup: 2000
|
# fsGroup: 2000
|
||||||
@@ -63,6 +69,16 @@ service:
|
|||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
port: 443
|
port: 443
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
serviceMonitor: false
|
||||||
|
serviceMonitorLabels: {}
|
||||||
|
port: 8443
|
||||||
|
proxy:
|
||||||
|
enabled: true
|
||||||
|
image:
|
||||||
|
repository: quay.io/brancz/kube-rbac-proxy
|
||||||
|
tag: v0.10.0
|
||||||
|
|
||||||
resources:
|
resources:
|
||||||
{}
|
{}
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||||
@@ -76,13 +92,6 @@ resources:
|
|||||||
# cpu: 100m
|
# cpu: 100m
|
||||||
# memory: 128Mi
|
# memory: 128Mi
|
||||||
|
|
||||||
autoscaling:
|
|
||||||
enabled: false
|
|
||||||
minReplicas: 1
|
|
||||||
maxReplicas: 100
|
|
||||||
targetCPUUtilizationPercentage: 80
|
|
||||||
# targetMemoryUtilizationPercentage: 80
|
|
||||||
|
|
||||||
nodeSelector: {}
|
nodeSelector: {}
|
||||||
|
|
||||||
tolerations: []
|
tolerations: []
|
||||||
@@ -109,14 +118,13 @@ scope:
|
|||||||
|
|
||||||
githubWebhookServer:
|
githubWebhookServer:
|
||||||
enabled: false
|
enabled: false
|
||||||
labels: {}
|
|
||||||
replicaCount: 1
|
replicaCount: 1
|
||||||
syncPeriod: 10m
|
syncPeriod: 10m
|
||||||
secret:
|
secret:
|
||||||
create: true
|
create: false
|
||||||
name: "github-webhook-server"
|
name: "github-webhook-server"
|
||||||
### GitHub Webhook Configuration
|
### GitHub Webhook Configuration
|
||||||
#github_webhook_secret_token: ""
|
github_webhook_secret_token: ""
|
||||||
imagePullSecrets: []
|
imagePullSecrets: []
|
||||||
nameOverride: ""
|
nameOverride: ""
|
||||||
fullnameOverride: ""
|
fullnameOverride: ""
|
||||||
@@ -129,6 +137,7 @@ githubWebhookServer:
|
|||||||
# If not set and create is true, a name is generated using the fullname template
|
# If not set and create is true, a name is generated using the fullname template
|
||||||
name: ""
|
name: ""
|
||||||
podAnnotations: {}
|
podAnnotations: {}
|
||||||
|
podLabels: {}
|
||||||
podSecurityContext: {}
|
podSecurityContext: {}
|
||||||
# fsGroup: 2000
|
# fsGroup: 2000
|
||||||
securityContext: {}
|
securityContext: {}
|
||||||
@@ -139,6 +148,7 @@ githubWebhookServer:
|
|||||||
priorityClassName: ""
|
priorityClassName: ""
|
||||||
service:
|
service:
|
||||||
type: ClusterIP
|
type: ClusterIP
|
||||||
|
annotations: {}
|
||||||
ports:
|
ports:
|
||||||
- port: 80
|
- port: 80
|
||||||
targetPort: http
|
targetPort: http
|
||||||
|
|||||||
@@ -25,8 +25,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/summerwind/actions-runner-controller/controllers"
|
"github.com/actions-runner-controller/actions-runner-controller/controllers"
|
||||||
|
zaplib "go.uber.org/zap"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/exec"
|
_ "k8s.io/client-go/plugin/pkg/client/auth/exec"
|
||||||
@@ -42,6 +43,13 @@ var (
|
|||||||
setupLog = ctrl.Log.WithName("setup")
|
setupLog = ctrl.Log.WithName("setup")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
logLevelDebug = "debug"
|
||||||
|
logLevelInfo = "info"
|
||||||
|
logLevelWarn = "warn"
|
||||||
|
logLevelError = "error"
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
_ = clientgoscheme.AddToScheme(scheme)
|
_ = clientgoscheme.AddToScheme(scheme)
|
||||||
|
|
||||||
@@ -63,6 +71,7 @@ func main() {
|
|||||||
|
|
||||||
enableLeaderElection bool
|
enableLeaderElection bool
|
||||||
syncPeriod time.Duration
|
syncPeriod time.Duration
|
||||||
|
logLevel string
|
||||||
)
|
)
|
||||||
|
|
||||||
webhookSecretToken = os.Getenv("GITHUB_WEBHOOK_SECRET_TOKEN")
|
webhookSecretToken = os.Getenv("GITHUB_WEBHOOK_SECRET_TOKEN")
|
||||||
@@ -73,6 +82,7 @@ func main() {
|
|||||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
||||||
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||||
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change")
|
flag.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled. When you use autoscaling, set to a lower value like 10 minute, because this corresponds to the minimum time to react on demand change")
|
||||||
|
flag.StringVar(&logLevel, "log-level", logLevelDebug, `The verbosity of the logging. Valid values are "debug", "info", "warn", "error". Defaults to "debug".`)
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if webhookSecretToken == "" {
|
if webhookSecretToken == "" {
|
||||||
@@ -86,7 +96,19 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
logger := zap.New(func(o *zap.Options) {
|
logger := zap.New(func(o *zap.Options) {
|
||||||
o.Development = true
|
switch logLevel {
|
||||||
|
case logLevelDebug:
|
||||||
|
o.Development = true
|
||||||
|
case logLevelInfo:
|
||||||
|
lvl := zaplib.NewAtomicLevelAt(zaplib.InfoLevel)
|
||||||
|
o.Level = &lvl
|
||||||
|
case logLevelWarn:
|
||||||
|
lvl := zaplib.NewAtomicLevelAt(zaplib.WarnLevel)
|
||||||
|
o.Level = &lvl
|
||||||
|
case logLevelError:
|
||||||
|
lvl := zaplib.NewAtomicLevelAt(zaplib.ErrorLevel)
|
||||||
|
o.Level = &lvl
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
ctrl.SetLogger(logger)
|
ctrl.SetLogger(logger)
|
||||||
@@ -128,7 +150,7 @@ func main() {
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
setupLog.Info("starting webhook server")
|
setupLog.Info("starting webhook server")
|
||||||
if err := mgr.Start(ctx.Done()); err != nil {
|
if err := mgr.Start(ctx); err != nil {
|
||||||
setupLog.Error(err, "problem running manager")
|
setupLog.Error(err, "problem running manager")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
@@ -161,7 +183,7 @@ func main() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
<-ctrl.SetupSignalHandler()
|
<-ctrl.SetupSignalHandler().Done()
|
||||||
cancel()
|
cancel()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|||||||
@@ -1,227 +1,304 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: apiextensions.k8s.io/v1beta1
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
controller-gen.kubebuilder.io/version: v0.3.0
|
controller-gen.kubebuilder.io/version: v0.6.0
|
||||||
creationTimestamp: null
|
creationTimestamp: null
|
||||||
name: horizontalrunnerautoscalers.actions.summerwind.dev
|
name: horizontalrunnerautoscalers.actions.summerwind.dev
|
||||||
spec:
|
spec:
|
||||||
additionalPrinterColumns:
|
|
||||||
- JSONPath: .spec.minReplicas
|
|
||||||
name: Min
|
|
||||||
type: number
|
|
||||||
- JSONPath: .spec.maxReplicas
|
|
||||||
name: Max
|
|
||||||
type: number
|
|
||||||
- JSONPath: .status.desiredReplicas
|
|
||||||
name: Desired
|
|
||||||
type: number
|
|
||||||
group: actions.summerwind.dev
|
group: actions.summerwind.dev
|
||||||
names:
|
names:
|
||||||
kind: HorizontalRunnerAutoscaler
|
kind: HorizontalRunnerAutoscaler
|
||||||
listKind: HorizontalRunnerAutoscalerList
|
listKind: HorizontalRunnerAutoscalerList
|
||||||
plural: horizontalrunnerautoscalers
|
plural: horizontalrunnerautoscalers
|
||||||
|
shortNames:
|
||||||
|
- hra
|
||||||
singular: horizontalrunnerautoscaler
|
singular: horizontalrunnerautoscaler
|
||||||
scope: Namespaced
|
scope: Namespaced
|
||||||
subresources:
|
versions:
|
||||||
status: {}
|
- additionalPrinterColumns:
|
||||||
validation:
|
- jsonPath: .spec.minReplicas
|
||||||
openAPIV3Schema:
|
name: Min
|
||||||
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
|
type: number
|
||||||
API
|
- jsonPath: .spec.maxReplicas
|
||||||
properties:
|
name: Max
|
||||||
apiVersion:
|
type: number
|
||||||
description: 'APIVersion defines the versioned schema of this representation
|
- jsonPath: .status.desiredReplicas
|
||||||
of an object. Servers should convert recognized schemas to the latest
|
name: Desired
|
||||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
type: number
|
||||||
type: string
|
- jsonPath: .status.scheduledOverridesSummary
|
||||||
kind:
|
name: Schedule
|
||||||
description: 'Kind is a string value representing the REST resource this
|
type: string
|
||||||
object represents. Servers may infer this from the endpoint the client
|
name: v1alpha1
|
||||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
schema:
|
||||||
type: string
|
openAPIV3Schema:
|
||||||
metadata:
|
description: HorizontalRunnerAutoscaler is the Schema for the horizontalrunnerautoscaler
|
||||||
type: object
|
API
|
||||||
spec:
|
properties:
|
||||||
description: HorizontalRunnerAutoscalerSpec defines the desired state of
|
apiVersion:
|
||||||
HorizontalRunnerAutoscaler
|
description: 'APIVersion defines the versioned schema of this representation
|
||||||
properties:
|
of an object. Servers should convert recognized schemas to the latest
|
||||||
capacityReservations:
|
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||||
items:
|
type: string
|
||||||
description: CapacityReservation specifies the number of replicas
|
kind:
|
||||||
temporarily added to the scale target until ExpirationTime.
|
description: 'Kind is a string value representing the REST resource this
|
||||||
|
object represents. Servers may infer this from the endpoint the client
|
||||||
|
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||||
|
type: string
|
||||||
|
metadata:
|
||||||
|
type: object
|
||||||
|
spec:
|
||||||
|
description: HorizontalRunnerAutoscalerSpec defines the desired state
|
||||||
|
of HorizontalRunnerAutoscaler
|
||||||
|
properties:
|
||||||
|
capacityReservations:
|
||||||
|
items:
|
||||||
|
description: CapacityReservation specifies the number of replicas
|
||||||
|
temporarily added to the scale target until ExpirationTime.
|
||||||
|
properties:
|
||||||
|
expirationTime:
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
replicas:
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
maxReplicas:
|
||||||
|
description: MinReplicas is the maximum number of replicas the deployment
|
||||||
|
is allowed to scale
|
||||||
|
type: integer
|
||||||
|
metrics:
|
||||||
|
description: Metrics is the collection of various metric targets to
|
||||||
|
calculate desired number of runners
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
repositoryNames:
|
||||||
|
description: RepositoryNames is the list of repository names
|
||||||
|
to be used for calculating the metric. For example, a repository
|
||||||
|
name is the REPO part of `github.com/USER/REPO`.
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
scaleDownAdjustment:
|
||||||
|
description: ScaleDownAdjustment is the number of runners removed
|
||||||
|
on scale-down. You can only specify either ScaleDownFactor
|
||||||
|
or ScaleDownAdjustment.
|
||||||
|
type: integer
|
||||||
|
scaleDownFactor:
|
||||||
|
description: ScaleDownFactor is the multiplicative factor applied
|
||||||
|
to the current number of runners used to determine how many
|
||||||
|
pods should be removed.
|
||||||
|
type: string
|
||||||
|
scaleDownThreshold:
|
||||||
|
description: ScaleDownThreshold is the percentage of busy runners
|
||||||
|
less than which will trigger the hpa to scale the runners
|
||||||
|
down.
|
||||||
|
type: string
|
||||||
|
scaleUpAdjustment:
|
||||||
|
description: ScaleUpAdjustment is the number of runners added
|
||||||
|
on scale-up. You can only specify either ScaleUpFactor or
|
||||||
|
ScaleUpAdjustment.
|
||||||
|
type: integer
|
||||||
|
scaleUpFactor:
|
||||||
|
description: ScaleUpFactor is the multiplicative factor applied
|
||||||
|
to the current number of runners used to determine how many
|
||||||
|
pods should be added.
|
||||||
|
type: string
|
||||||
|
scaleUpThreshold:
|
||||||
|
description: ScaleUpThreshold is the percentage of busy runners
|
||||||
|
greater than which will trigger the hpa to scale runners up.
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
description: Type is the type of metric to be used for autoscaling.
|
||||||
|
The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
minReplicas:
|
||||||
|
description: MinReplicas is the minimum number of replicas the deployment
|
||||||
|
is allowed to scale
|
||||||
|
type: integer
|
||||||
|
scaleDownDelaySecondsAfterScaleOut:
|
||||||
|
description: ScaleDownDelaySecondsAfterScaleUp is the approximate
|
||||||
|
delay for a scale down followed by a scale up Used to prevent flapping
|
||||||
|
(down->up->down->... loop)
|
||||||
|
type: integer
|
||||||
|
scaleTargetRef:
|
||||||
|
description: ScaleTargetRef sis the reference to scaled resource like
|
||||||
|
RunnerDeployment
|
||||||
properties:
|
properties:
|
||||||
expirationTime:
|
kind:
|
||||||
format: date-time
|
description: Kind is the type of resource being referenced
|
||||||
|
enum:
|
||||||
|
- RunnerDeployment
|
||||||
|
- RunnerSet
|
||||||
type: string
|
type: string
|
||||||
name:
|
name:
|
||||||
|
description: Name is the name of resource being referenced
|
||||||
type: string
|
type: string
|
||||||
replicas:
|
|
||||||
type: integer
|
|
||||||
type: object
|
type: object
|
||||||
type: array
|
scaleUpTriggers:
|
||||||
maxReplicas:
|
description: "ScaleUpTriggers is an experimental feature to increase
|
||||||
description: MinReplicas is the maximum number of replicas the deployment
|
the desired replicas by 1 on each webhook requested received by
|
||||||
is allowed to scale
|
the webhookBasedAutoscaler. \n This feature requires you to also
|
||||||
type: integer
|
enable and deploy the webhookBasedAutoscaler onto your cluster.
|
||||||
metrics:
|
\n Note that the added runners remain until the next sync period
|
||||||
description: Metrics is the collection of various metric targets to
|
at least, and they may or may not be used by GitHub Actions depending
|
||||||
calculate desired number of runners
|
on the timing. They are intended to be used to gain \"resource slack\"
|
||||||
items:
|
immediately after you receive a webhook from GitHub, so that you
|
||||||
properties:
|
can loosely expect MinReplicas runners to be always available."
|
||||||
repositoryNames:
|
items:
|
||||||
description: RepositoryNames is the list of repository names to
|
properties:
|
||||||
be used for calculating the metric. For example, a repository
|
amount:
|
||||||
name is the REPO part of `github.com/USER/REPO`.
|
type: integer
|
||||||
items:
|
duration:
|
||||||
type: string
|
type: string
|
||||||
type: array
|
githubEvent:
|
||||||
scaleDownAdjustment:
|
properties:
|
||||||
description: ScaleDownAdjustment is the number of runners removed
|
checkRun:
|
||||||
on scale-down. You can only specify either ScaleDownFactor or
|
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
||||||
ScaleDownAdjustment.
|
properties:
|
||||||
type: integer
|
names:
|
||||||
scaleDownFactor:
|
description: Names is a list of GitHub Actions glob
|
||||||
description: ScaleDownFactor is the multiplicative factor applied
|
patterns. Any check_run event whose name matches one
|
||||||
to the current number of runners used to determine how many
|
of patterns in the list can trigger autoscaling. Note
|
||||||
pods should be removed.
|
that check_run name seem to equal to the job name
|
||||||
type: string
|
you've defined in your actions workflow yaml file.
|
||||||
scaleDownThreshold:
|
So it is very likely that you can utilize this to
|
||||||
description: ScaleDownThreshold is the percentage of busy runners
|
trigger depending on the job.
|
||||||
less than which will trigger the hpa to scale the runners down.
|
items:
|
||||||
type: string
|
type: string
|
||||||
scaleUpAdjustment:
|
type: array
|
||||||
description: ScaleUpAdjustment is the number of runners added
|
repositories:
|
||||||
on scale-up. You can only specify either ScaleUpFactor or ScaleUpAdjustment.
|
description: Repositories is a list of GitHub repositories.
|
||||||
type: integer
|
Any check_run event whose repository matches one of
|
||||||
scaleUpFactor:
|
repositories in the list can trigger autoscaling.
|
||||||
description: ScaleUpFactor is the multiplicative factor applied
|
items:
|
||||||
to the current number of runners used to determine how many
|
type: string
|
||||||
pods should be added.
|
type: array
|
||||||
type: string
|
status:
|
||||||
scaleUpThreshold:
|
|
||||||
description: ScaleUpThreshold is the percentage of busy runners
|
|
||||||
greater than which will trigger the hpa to scale runners up.
|
|
||||||
type: string
|
|
||||||
type:
|
|
||||||
description: Type is the type of metric to be used for autoscaling.
|
|
||||||
The only supported Type is TotalNumberOfQueuedAndInProgressWorkflowRuns
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
type: array
|
|
||||||
minReplicas:
|
|
||||||
description: MinReplicas is the minimum number of replicas the deployment
|
|
||||||
is allowed to scale
|
|
||||||
type: integer
|
|
||||||
scaleDownDelaySecondsAfterScaleOut:
|
|
||||||
description: ScaleDownDelaySecondsAfterScaleUp is the approximate delay
|
|
||||||
for a scale down followed by a scale up Used to prevent flapping (down->up->down->...
|
|
||||||
loop)
|
|
||||||
type: integer
|
|
||||||
scaleTargetRef:
|
|
||||||
description: ScaleTargetRef sis the reference to scaled resource like
|
|
||||||
RunnerDeployment
|
|
||||||
properties:
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
type: object
|
|
||||||
scaleUpTriggers:
|
|
||||||
description: "ScaleUpTriggers is an experimental feature to increase
|
|
||||||
the desired replicas by 1 on each webhook requested received by the
|
|
||||||
webhookBasedAutoscaler. \n This feature requires you to also enable
|
|
||||||
and deploy the webhookBasedAutoscaler onto your cluster. \n Note that
|
|
||||||
the added runners remain until the next sync period at least, and
|
|
||||||
they may or may not be used by GitHub Actions depending on the timing.
|
|
||||||
They are intended to be used to gain \"resource slack\" immediately
|
|
||||||
after you receive a webhook from GitHub, so that you can loosely expect
|
|
||||||
MinReplicas runners to be always available."
|
|
||||||
items:
|
|
||||||
properties:
|
|
||||||
amount:
|
|
||||||
type: integer
|
|
||||||
duration:
|
|
||||||
type: string
|
|
||||||
githubEvent:
|
|
||||||
properties:
|
|
||||||
checkRun:
|
|
||||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#check_run
|
|
||||||
properties:
|
|
||||||
names:
|
|
||||||
description: Names is a list of GitHub Actions glob patterns.
|
|
||||||
Any check_run event whose name matches one of patterns
|
|
||||||
in the list can trigger autoscaling. Note that check_run
|
|
||||||
name seem to equal to the job name you've defined in
|
|
||||||
your actions workflow yaml file. So it is very likely
|
|
||||||
that you can utilize this to trigger depending on the
|
|
||||||
job.
|
|
||||||
items:
|
|
||||||
type: string
|
type: string
|
||||||
type: array
|
types:
|
||||||
status:
|
items:
|
||||||
type: string
|
type: string
|
||||||
types:
|
type: array
|
||||||
items:
|
type: object
|
||||||
type: string
|
pullRequest:
|
||||||
type: array
|
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
||||||
type: object
|
properties:
|
||||||
pullRequest:
|
branches:
|
||||||
description: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request
|
items:
|
||||||
properties:
|
type: string
|
||||||
branches:
|
type: array
|
||||||
items:
|
types:
|
||||||
type: string
|
items:
|
||||||
type: array
|
type: string
|
||||||
types:
|
type: array
|
||||||
items:
|
type: object
|
||||||
type: string
|
push:
|
||||||
type: array
|
description: PushSpec is the condition for triggering scale-up
|
||||||
type: object
|
on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
||||||
push:
|
type: object
|
||||||
description: PushSpec is the condition for triggering scale-up
|
type: object
|
||||||
on push event Also see https://docs.github.com/en/actions/reference/events-that-trigger-workflows#push
|
type: object
|
||||||
type: object
|
type: array
|
||||||
type: object
|
scheduledOverrides:
|
||||||
type: object
|
description: ScheduledOverrides is the list of ScheduledOverride.
|
||||||
type: array
|
It can be used to override a few fields of HorizontalRunnerAutoscalerSpec
|
||||||
type: object
|
on schedule. The earlier a scheduled override is, the higher it
|
||||||
status:
|
is prioritized.
|
||||||
properties:
|
items:
|
||||||
cacheEntries:
|
description: ScheduledOverride can be used to override a few fields
|
||||||
items:
|
of HorizontalRunnerAutoscalerSpec on schedule. A schedule can
|
||||||
properties:
|
optionally be recurring, so that the correspoding override happens
|
||||||
expirationTime:
|
every day, week, month, or year.
|
||||||
format: date-time
|
properties:
|
||||||
type: string
|
endTime:
|
||||||
key:
|
description: EndTime is the time at which the first override
|
||||||
type: string
|
ends.
|
||||||
value:
|
format: date-time
|
||||||
type: integer
|
type: string
|
||||||
type: object
|
minReplicas:
|
||||||
type: array
|
description: MinReplicas is the number of runners while overriding.
|
||||||
desiredReplicas:
|
If omitted, it doesn't override minReplicas.
|
||||||
description: DesiredReplicas is the total number of desired, non-terminated
|
minimum: 0
|
||||||
and latest pods to be set for the primary RunnerSet This doesn't include
|
nullable: true
|
||||||
outdated pods while upgrading the deployment and replacing the runnerset.
|
type: integer
|
||||||
type: integer
|
recurrenceRule:
|
||||||
lastSuccessfulScaleOutTime:
|
properties:
|
||||||
format: date-time
|
frequency:
|
||||||
nullable: true
|
description: Frequency is the name of a predefined interval
|
||||||
type: string
|
of each recurrence. The valid values are "Daily", "Weekly",
|
||||||
observedGeneration:
|
"Monthly", and "Yearly". If empty, the corresponding override
|
||||||
description: ObservedGeneration is the most recent generation observed
|
happens only once.
|
||||||
for the target. It corresponds to e.g. RunnerDeployment's generation,
|
enum:
|
||||||
which is updated on mutation by the API Server.
|
- Daily
|
||||||
format: int64
|
- Weekly
|
||||||
type: integer
|
- Monthly
|
||||||
type: object
|
- Yearly
|
||||||
type: object
|
type: string
|
||||||
version: v1alpha1
|
untilTime:
|
||||||
versions:
|
description: UntilTime is the time of the final recurrence.
|
||||||
- name: v1alpha1
|
If empty, the schedule recurs forever.
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
startTime:
|
||||||
|
description: StartTime is the time at which the first override
|
||||||
|
starts.
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- endTime
|
||||||
|
- startTime
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
|
status:
|
||||||
|
properties:
|
||||||
|
cacheEntries:
|
||||||
|
items:
|
||||||
|
properties:
|
||||||
|
expirationTime:
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
key:
|
||||||
|
type: string
|
||||||
|
value:
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
|
type: array
|
||||||
|
desiredReplicas:
|
||||||
|
description: DesiredReplicas is the total number of desired, non-terminated
|
||||||
|
and latest pods to be set for the primary RunnerSet This doesn't
|
||||||
|
include outdated pods while upgrading the deployment and replacing
|
||||||
|
the runnerset.
|
||||||
|
type: integer
|
||||||
|
lastSuccessfulScaleOutTime:
|
||||||
|
format: date-time
|
||||||
|
nullable: true
|
||||||
|
type: string
|
||||||
|
observedGeneration:
|
||||||
|
description: ObservedGeneration is the most recent generation observed
|
||||||
|
for the target. It corresponds to e.g. RunnerDeployment's generation,
|
||||||
|
which is updated on mutation by the API Server.
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
scheduledOverridesSummary:
|
||||||
|
description: ScheduledOverridesSummary is the summary of active and
|
||||||
|
upcoming scheduled overrides to be shown in e.g. a column of a `kubectl
|
||||||
|
get hra` output for observability.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
type: object
|
||||||
served: true
|
served: true
|
||||||
storage: true
|
storage: true
|
||||||
|
subresources:
|
||||||
|
status: {}
|
||||||
status:
|
status:
|
||||||
acceptedNames:
|
acceptedNames:
|
||||||
kind: ""
|
kind: ""
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
7165
config/crd/bases/actions.summerwind.dev_runnersets.yaml
Normal file
7165
config/crd/bases/actions.summerwind.dev_runnersets.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
|||||||
# The following patch adds a directive for certmanager to inject CA into the CRD
|
# The following patch adds a directive for certmanager to inject CA into the CRD
|
||||||
# CRD conversion requires k8s 1.13 or later.
|
# CRD conversion requires k8s 1.13 or later.
|
||||||
apiVersion: apiextensions.k8s.io/v1beta1
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
|
|||||||
@@ -1,17 +1,18 @@
|
|||||||
# The following patch enables conversion webhook for CRD
|
# The following patch enables conversion webhook for CRD
|
||||||
# CRD conversion requires k8s 1.13 or later.
|
# CRD conversion requires k8s 1.13 or later.
|
||||||
apiVersion: apiextensions.k8s.io/v1beta1
|
apiVersion: apiextensions.k8s.io/v1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
name: runners.actions.summerwind.dev
|
name: runners.actions.summerwind.dev
|
||||||
spec:
|
spec:
|
||||||
conversion:
|
conversion:
|
||||||
strategy: Webhook
|
strategy: Webhook
|
||||||
webhookClientConfig:
|
webhook:
|
||||||
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
|
clientConfig:
|
||||||
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
|
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
|
||||||
caBundle: Cg==
|
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
|
||||||
service:
|
caBundle: Cg==
|
||||||
namespace: system
|
service:
|
||||||
name: webhook-service
|
namespace: system
|
||||||
path: /convert
|
name: webhook-service
|
||||||
|
path: /convert
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: kube-rbac-proxy
|
- name: kube-rbac-proxy
|
||||||
image: quay.io/brancz/kube-rbac-proxy:v0.8.0
|
image: quay.io/brancz/kube-rbac-proxy:v0.10.0
|
||||||
args:
|
args:
|
||||||
- "--secure-listen-address=0.0.0.0:8443"
|
- "--secure-listen-address=0.0.0.0:8443"
|
||||||
- "--upstream=http://127.0.0.1:8080/"
|
- "--upstream=http://127.0.0.1:8080/"
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
# This patch add annotation to admission webhook config and
|
# This patch add annotation to admission webhook config and
|
||||||
# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize.
|
# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize.
|
||||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
apiVersion: admissionregistration.k8s.io/v1
|
||||||
kind: MutatingWebhookConfiguration
|
kind: MutatingWebhookConfiguration
|
||||||
metadata:
|
metadata:
|
||||||
name: mutating-webhook-configuration
|
name: mutating-webhook-configuration
|
||||||
annotations:
|
annotations:
|
||||||
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
|
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
|
||||||
---
|
---
|
||||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
apiVersion: admissionregistration.k8s.io/v1
|
||||||
kind: ValidatingWebhookConfiguration
|
kind: ValidatingWebhookConfiguration
|
||||||
metadata:
|
metadata:
|
||||||
name: validating-webhook-configuration
|
name: validating-webhook-configuration
|
||||||
|
|||||||
@@ -134,6 +134,67 @@ rules:
|
|||||||
- get
|
- get
|
||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnersets
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnersets/finalizers
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- actions.summerwind.dev
|
||||||
|
resources:
|
||||||
|
- runnersets/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- apps
|
||||||
|
resources:
|
||||||
|
- statefulsets
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- delete
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- apps
|
||||||
|
resources:
|
||||||
|
- statefulsets/status
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- coordination.k8s.io
|
||||||
|
resources:
|
||||||
|
- leases
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- update
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
|
|||||||
@@ -3,4 +3,4 @@ kind: Runner
|
|||||||
metadata:
|
metadata:
|
||||||
name: summerwind-actions-runner-controller
|
name: summerwind-actions-runner-controller
|
||||||
spec:
|
spec:
|
||||||
repository: summerwind/actions-runner-controller
|
repository: actions-runner-controller/actions-runner-controller
|
||||||
|
|||||||
@@ -6,4 +6,4 @@ spec:
|
|||||||
replicas: 2
|
replicas: 2
|
||||||
template:
|
template:
|
||||||
spec:
|
spec:
|
||||||
repository: summerwind/actions-runner-controller
|
repository: actions-runner-controller/actions-runner-controller
|
||||||
|
|||||||
@@ -6,4 +6,4 @@ spec:
|
|||||||
replicas: 2
|
replicas: 2
|
||||||
template:
|
template:
|
||||||
spec:
|
spec:
|
||||||
repository: summerwind/actions-runner-controller
|
repository: actions-runner-controller/actions-runner-controller
|
||||||
|
|||||||
@@ -1,13 +1,14 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
apiVersion: admissionregistration.k8s.io/v1
|
||||||
kind: MutatingWebhookConfiguration
|
kind: MutatingWebhookConfiguration
|
||||||
metadata:
|
metadata:
|
||||||
creationTimestamp: null
|
creationTimestamp: null
|
||||||
name: mutating-webhook-configuration
|
name: mutating-webhook-configuration
|
||||||
webhooks:
|
webhooks:
|
||||||
- clientConfig:
|
- admissionReviewVersions:
|
||||||
caBundle: Cg==
|
- v1beta1
|
||||||
|
clientConfig:
|
||||||
service:
|
service:
|
||||||
name: webhook-service
|
name: webhook-service
|
||||||
namespace: system
|
namespace: system
|
||||||
@@ -24,8 +25,10 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runners
|
- runners
|
||||||
- clientConfig:
|
sideEffects: None
|
||||||
caBundle: Cg==
|
- admissionReviewVersions:
|
||||||
|
- v1beta1
|
||||||
|
clientConfig:
|
||||||
service:
|
service:
|
||||||
name: webhook-service
|
name: webhook-service
|
||||||
namespace: system
|
namespace: system
|
||||||
@@ -42,8 +45,10 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runnerdeployments
|
- runnerdeployments
|
||||||
- clientConfig:
|
sideEffects: None
|
||||||
caBundle: Cg==
|
- admissionReviewVersions:
|
||||||
|
- v1beta1
|
||||||
|
clientConfig:
|
||||||
service:
|
service:
|
||||||
name: webhook-service
|
name: webhook-service
|
||||||
namespace: system
|
namespace: system
|
||||||
@@ -60,16 +65,37 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runnerreplicasets
|
- runnerreplicasets
|
||||||
|
sideEffects: None
|
||||||
|
- admissionReviewVersions:
|
||||||
|
- v1beta1
|
||||||
|
clientConfig:
|
||||||
|
service:
|
||||||
|
name: webhook-service
|
||||||
|
namespace: system
|
||||||
|
path: /mutate-runner-set-pod
|
||||||
|
failurePolicy: Ignore
|
||||||
|
name: mutate-runner-pod.webhook.actions.summerwind.dev
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
apiVersions:
|
||||||
|
- v1
|
||||||
|
operations:
|
||||||
|
- CREATE
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
sideEffects: None
|
||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
apiVersion: admissionregistration.k8s.io/v1
|
||||||
kind: ValidatingWebhookConfiguration
|
kind: ValidatingWebhookConfiguration
|
||||||
metadata:
|
metadata:
|
||||||
creationTimestamp: null
|
creationTimestamp: null
|
||||||
name: validating-webhook-configuration
|
name: validating-webhook-configuration
|
||||||
webhooks:
|
webhooks:
|
||||||
- clientConfig:
|
- admissionReviewVersions:
|
||||||
caBundle: Cg==
|
- v1beta1
|
||||||
|
clientConfig:
|
||||||
service:
|
service:
|
||||||
name: webhook-service
|
name: webhook-service
|
||||||
namespace: system
|
namespace: system
|
||||||
@@ -86,8 +112,10 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runners
|
- runners
|
||||||
- clientConfig:
|
sideEffects: None
|
||||||
caBundle: Cg==
|
- admissionReviewVersions:
|
||||||
|
- v1beta1
|
||||||
|
clientConfig:
|
||||||
service:
|
service:
|
||||||
name: webhook-service
|
name: webhook-service
|
||||||
namespace: system
|
namespace: system
|
||||||
@@ -104,8 +132,10 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runnerdeployments
|
- runnerdeployments
|
||||||
- clientConfig:
|
sideEffects: None
|
||||||
caBundle: Cg==
|
- admissionReviewVersions:
|
||||||
|
- v1beta1
|
||||||
|
clientConfig:
|
||||||
service:
|
service:
|
||||||
name: webhook-service
|
name: webhook-service
|
||||||
namespace: system
|
namespace: system
|
||||||
@@ -122,3 +152,4 @@ webhooks:
|
|||||||
- UPDATE
|
- UPDATE
|
||||||
resources:
|
resources:
|
||||||
- runnerreplicasets
|
- runnerreplicasets
|
||||||
|
sideEffects: None
|
||||||
|
|||||||
@@ -9,10 +9,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -63,7 +60,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) fetchSuggestedReplicasFromCache(h
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
||||||
if hra.Spec.MinReplicas == nil {
|
if hra.Spec.MinReplicas == nil {
|
||||||
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
|
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
|
||||||
} else if hra.Spec.MaxReplicas == nil {
|
} else if hra.Spec.MaxReplicas == nil {
|
||||||
@@ -71,44 +68,87 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(rd v1alpha
|
|||||||
}
|
}
|
||||||
|
|
||||||
metrics := hra.Spec.Metrics
|
metrics := hra.Spec.Metrics
|
||||||
if len(metrics) == 0 {
|
numMetrics := len(metrics)
|
||||||
|
if numMetrics == 0 {
|
||||||
if len(hra.Spec.ScaleUpTriggers) == 0 {
|
if len(hra.Spec.ScaleUpTriggers) == 0 {
|
||||||
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(rd, hra)
|
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
} else if metrics[0].Type == v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns {
|
} else if numMetrics > 2 {
|
||||||
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(rd, hra)
|
return nil, fmt.Errorf("Too many autoscaling metrics configured: It must be 0 to 2, but got %d", numMetrics)
|
||||||
} else if metrics[0].Type == v1alpha1.AutoscalingMetricTypePercentageRunnersBusy {
|
|
||||||
return r.suggestReplicasByPercentageRunnersBusy(rd, hra)
|
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("validting autoscaling metrics: unsupported metric type %q", metrics[0].Type)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
primaryMetric := metrics[0]
|
||||||
|
primaryMetricType := primaryMetric.Type
|
||||||
|
|
||||||
|
var (
|
||||||
|
suggested *int
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
switch primaryMetricType {
|
||||||
|
case v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns:
|
||||||
|
suggested, err = r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, &primaryMetric)
|
||||||
|
case v1alpha1.AutoscalingMetricTypePercentageRunnersBusy:
|
||||||
|
suggested, err = r.suggestReplicasByPercentageRunnersBusy(st, hra, primaryMetric)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("validting autoscaling metrics: unsupported metric type %q", primaryMetric)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if suggested != nil && *suggested > 0 {
|
||||||
|
return suggested, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(metrics) == 1 {
|
||||||
|
// This is never supposed to happen but anyway-
|
||||||
|
// Fall-back to `minReplicas + capacityReservedThroughWebhook`.
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point, we are sure that there are exactly 2 Metrics entries.
|
||||||
|
|
||||||
|
fallbackMetric := metrics[1]
|
||||||
|
fallbackMetricType := fallbackMetric.Type
|
||||||
|
|
||||||
|
if primaryMetricType != v1alpha1.AutoscalingMetricTypePercentageRunnersBusy ||
|
||||||
|
fallbackMetricType != v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns {
|
||||||
|
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"invalid HRA Spec: Metrics[0] of %s cannot be combined with Metrics[1] of %s: The only allowed combination is 0=PercentageRunnersBusy and 1=TotalNumberOfQueuedAndInProgressWorkflowRuns",
|
||||||
|
primaryMetricType, fallbackMetricType,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, &fallbackMetric)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics *v1alpha1.MetricSpec) (*int, error) {
|
||||||
|
|
||||||
var repos [][]string
|
var repos [][]string
|
||||||
metrics := hra.Spec.Metrics
|
repoID := st.repo
|
||||||
repoID := rd.Spec.Template.Spec.Repository
|
|
||||||
if repoID == "" {
|
if repoID == "" {
|
||||||
orgName := rd.Spec.Template.Spec.Organization
|
orgName := st.org
|
||||||
if orgName == "" {
|
if orgName == "" {
|
||||||
return nil, fmt.Errorf("asserting runner deployment spec to detect bug: spec.template.organization should not be empty on this code path")
|
return nil, fmt.Errorf("asserting runner deployment spec to detect bug: spec.template.organization should not be empty on this code path")
|
||||||
}
|
}
|
||||||
|
|
||||||
// In case it's an organizational runners deployment without any scaling metrics defined,
|
// In case it's an organizational runners deployment without any scaling metrics defined,
|
||||||
// we assume that the desired replicas should always be `minReplicas + capacityReservedThroughWebhook`.
|
// we assume that the desired replicas should always be `minReplicas + capacityReservedThroughWebhook`.
|
||||||
// See https://github.com/summerwind/actions-runner-controller/issues/377#issuecomment-793372693
|
// See https://github.com/actions-runner-controller/actions-runner-controller/issues/377#issuecomment-793372693
|
||||||
if len(metrics) == 0 {
|
if metrics == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(metrics[0].RepositoryNames) == 0 {
|
if len(metrics.RepositoryNames) == 0 {
|
||||||
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment")
|
return nil, errors.New("validating autoscaling metrics: spec.autoscaling.metrics[].repositoryNames is required and must have one more more entries for organizational runner deployment")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, repoName := range metrics[0].RepositoryNames {
|
for _, repoName := range metrics.RepositoryNames {
|
||||||
repos = append(repos, []string{orgName, repoName})
|
repos = append(repos, []string{orgName, repoName})
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -187,16 +227,16 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
|
|||||||
"workflow_runs_queued", queued,
|
"workflow_runs_queued", queued,
|
||||||
"workflow_runs_unknown", unknown,
|
"workflow_runs_unknown", unknown,
|
||||||
"namespace", hra.Namespace,
|
"namespace", hra.Namespace,
|
||||||
"runner_deployment", rd.Name,
|
"kind", st.kind,
|
||||||
|
"name", st.st,
|
||||||
"horizontal_runner_autoscaler", hra.Name,
|
"horizontal_runner_autoscaler", hra.Name,
|
||||||
)
|
)
|
||||||
|
|
||||||
return &necessaryReplicas, nil
|
return &necessaryReplicas, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics v1alpha1.MetricSpec) (*int, error) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
metrics := hra.Spec.Metrics[0]
|
|
||||||
scaleUpThreshold := defaultScaleUpThreshold
|
scaleUpThreshold := defaultScaleUpThreshold
|
||||||
scaleDownThreshold := defaultScaleDownThreshold
|
scaleDownThreshold := defaultScaleDownThreshold
|
||||||
scaleUpFactor := defaultScaleUpFactor
|
scaleUpFactor := defaultScaleUpFactor
|
||||||
@@ -252,41 +292,15 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
|||||||
scaleDownFactor = sdf
|
scaleDownFactor = sdf
|
||||||
}
|
}
|
||||||
|
|
||||||
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
runnerMap, err := st.getRunnerMap()
|
||||||
var runnerList v1alpha1.RunnerList
|
|
||||||
|
|
||||||
var opts []client.ListOption
|
|
||||||
|
|
||||||
opts = append(opts, client.InNamespace(rd.Namespace))
|
|
||||||
|
|
||||||
selector, err := metav1.LabelSelectorAsSelector(getSelector(&rd))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
opts = append(opts, client.MatchingLabelsSelector{Selector: selector})
|
|
||||||
|
|
||||||
r.Log.V(2).Info("Finding runners with selector", "ns", rd.Namespace)
|
|
||||||
|
|
||||||
if err := r.List(
|
|
||||||
ctx,
|
|
||||||
&runnerList,
|
|
||||||
opts...,
|
|
||||||
); err != nil {
|
|
||||||
if !kerrors.IsNotFound(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
runnerMap := make(map[string]struct{})
|
|
||||||
for _, items := range runnerList.Items {
|
|
||||||
runnerMap[items.Name] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
enterprise = rd.Spec.Template.Spec.Enterprise
|
enterprise = st.enterprise
|
||||||
organization = rd.Spec.Template.Spec.Organization
|
organization = st.org
|
||||||
repository = rd.Spec.Template.Spec.Repository
|
repository = st.repo
|
||||||
)
|
)
|
||||||
|
|
||||||
// ListRunners will return all runners managed by GitHub - not restricted to ns
|
// ListRunners will return all runners managed by GitHub - not restricted to ns
|
||||||
@@ -301,7 +315,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
|||||||
|
|
||||||
var desiredReplicasBefore int
|
var desiredReplicasBefore int
|
||||||
|
|
||||||
if v := rd.Spec.Replicas; v == nil {
|
if v := st.replicas; v == nil {
|
||||||
desiredReplicasBefore = 1
|
desiredReplicasBefore = 1
|
||||||
} else {
|
} else {
|
||||||
desiredReplicasBefore = *v
|
desiredReplicasBefore = *v
|
||||||
@@ -313,7 +327,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
|||||||
numRunnersBusy int
|
numRunnersBusy int
|
||||||
)
|
)
|
||||||
|
|
||||||
numRunners = len(runnerList.Items)
|
numRunners = len(runnerMap)
|
||||||
|
|
||||||
for _, runner := range runners {
|
for _, runner := range runners {
|
||||||
if _, ok := runnerMap[*runner.Name]; ok {
|
if _, ok := runnerMap[*runner.Name]; ok {
|
||||||
@@ -340,7 +354,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
|||||||
desiredReplicas = int(float64(desiredReplicasBefore) * scaleDownFactor)
|
desiredReplicas = int(float64(desiredReplicasBefore) * scaleDownFactor)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
desiredReplicas = *rd.Spec.Replicas
|
desiredReplicas = *st.replicas
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTES for operators:
|
// NOTES for operators:
|
||||||
@@ -356,7 +370,8 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
|
|||||||
"num_runners_registered", numRunnersRegistered,
|
"num_runners_registered", numRunnersRegistered,
|
||||||
"num_runners_busy", numRunnersBusy,
|
"num_runners_busy", numRunnersBusy,
|
||||||
"namespace", hra.Namespace,
|
"namespace", hra.Namespace,
|
||||||
"runner_deployment", rd.Name,
|
"kind", st.kind,
|
||||||
|
"name", st.st,
|
||||||
"horizontal_runner_autoscaler", hra.Name,
|
"horizontal_runner_autoscaler", hra.Name,
|
||||||
"enterprise", enterprise,
|
"enterprise", enterprise,
|
||||||
"organization", organization,
|
"organization", organization,
|
||||||
|
|||||||
@@ -1,14 +1,15 @@
|
|||||||
package controllers
|
package controllers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"net/url"
|
"net/url"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/summerwind/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
"github.com/summerwind/actions-runner-controller/github/fake"
|
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
@@ -203,13 +204,15 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
Spec: v1alpha1.RunnerDeploymentSpec{
|
Spec: v1alpha1.RunnerDeploymentSpec{
|
||||||
Template: v1alpha1.RunnerTemplate{
|
Template: v1alpha1.RunnerTemplate{
|
||||||
Spec: v1alpha1.RunnerSpec{
|
Spec: v1alpha1.RunnerSpec{
|
||||||
Repository: tc.repo,
|
RunnerConfig: v1alpha1.RunnerConfig{
|
||||||
|
Repository: tc.repo,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Replicas: tc.fixed,
|
Replicas: tc.fixed,
|
||||||
},
|
},
|
||||||
Status: v1alpha1.RunnerDeploymentStatus{
|
Status: v1alpha1.RunnerDeploymentStatus{
|
||||||
Replicas: tc.sReplicas,
|
DesiredReplicas: tc.sReplicas,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -224,7 +227,14 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, rd, hra)
|
minReplicas, _, _, err := h.getMinReplicas(log, metav1Now.Time, hra)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
st := h.scaleTargetFromRD(context.Background(), rd)
|
||||||
|
|
||||||
|
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if tc.err == "" {
|
if tc.err == "" {
|
||||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||||
@@ -453,13 +463,15 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: v1alpha1.RunnerSpec{
|
Spec: v1alpha1.RunnerSpec{
|
||||||
Organization: tc.org,
|
RunnerConfig: v1alpha1.RunnerConfig{
|
||||||
|
Organization: tc.org,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Replicas: tc.fixed,
|
Replicas: tc.fixed,
|
||||||
},
|
},
|
||||||
Status: v1alpha1.RunnerDeploymentStatus{
|
Status: v1alpha1.RunnerDeploymentStatus{
|
||||||
Replicas: tc.sReplicas,
|
DesiredReplicas: tc.sReplicas,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -483,7 +495,14 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, rd, hra)
|
minReplicas, _, _, err := h.getMinReplicas(log, metav1Now.Time, hra)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
st := h.scaleTargetFromRD(context.Background(), rd)
|
||||||
|
|
||||||
|
got, _, _, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if tc.err == "" {
|
if tc.err == "" {
|
||||||
t.Fatalf("unexpected error: expected none, got %v", err)
|
t.Fatalf("unexpected error: expected none, got %v", err)
|
||||||
|
|||||||
@@ -20,21 +20,22 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
gogithub "github.com/google/go-github/v33/github"
|
gogithub "github.com/google/go-github/v37/github"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -60,7 +61,7 @@ type HorizontalRunnerAutoscalerGitHubWebhook struct {
|
|||||||
Name string
|
Name string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -182,6 +183,45 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
|||||||
"action", e.GetAction(),
|
"action", e.GetAction(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
case *gogithub.WorkflowJobEvent:
|
||||||
|
if workflowJob := e.GetWorkflowJob(); workflowJob != nil {
|
||||||
|
log = log.WithValues(
|
||||||
|
"workflowJob.status", workflowJob.GetStatus(),
|
||||||
|
"workflowJob.labels", workflowJob.Labels,
|
||||||
|
"repository.name", e.Repo.GetName(),
|
||||||
|
"repository.owner.login", e.Repo.Owner.GetLogin(),
|
||||||
|
"repository.owner.type", e.Repo.Owner.GetType(),
|
||||||
|
"action", e.GetAction(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
labels := e.WorkflowJob.Labels
|
||||||
|
|
||||||
|
switch e.GetAction() {
|
||||||
|
case "queued", "completed":
|
||||||
|
target, err = autoscaler.getJobScaleUpTargetForRepoOrOrg(
|
||||||
|
context.TODO(),
|
||||||
|
log,
|
||||||
|
e.Repo.GetName(),
|
||||||
|
e.Repo.Owner.GetLogin(),
|
||||||
|
e.Repo.Owner.GetType(),
|
||||||
|
labels,
|
||||||
|
)
|
||||||
|
|
||||||
|
if target != nil {
|
||||||
|
if e.GetAction() == "queued" {
|
||||||
|
target.Amount = 1
|
||||||
|
} else if e.GetAction() == "completed" {
|
||||||
|
// A nagative amount is processed in the tryScale func as a scale-down request,
|
||||||
|
// that erasese the oldest CapacityReservation with the same amount.
|
||||||
|
// If the first CapacityReservation was with Replicas=1, this negative scale target erases that,
|
||||||
|
// so that the resulting desired replicas decreases by 1.
|
||||||
|
target.Amount = -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
|
||||||
|
}
|
||||||
case *gogithub.PingEvent:
|
case *gogithub.PingEvent:
|
||||||
ok = true
|
ok = true
|
||||||
|
|
||||||
@@ -226,7 +266,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := autoscaler.tryScaleUp(context.TODO(), target); err != nil {
|
if err := autoscaler.tryScale(context.TODO(), target); err != nil {
|
||||||
log.Error(err, "could not scale up")
|
log.Error(err, "could not scale up")
|
||||||
|
|
||||||
return
|
return
|
||||||
@@ -236,7 +276,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.Respons
|
|||||||
|
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
msg := fmt.Sprintf("scaled %s by 1", target.Name)
|
msg := fmt.Sprintf("scaled %s by %d", target.Name, target.Amount)
|
||||||
|
|
||||||
autoscaler.Log.Info(msg)
|
autoscaler.Log.Info(msg)
|
||||||
|
|
||||||
@@ -330,6 +370,8 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleTarget(ctx co
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
autoscaler.Log.V(1).Info(fmt.Sprintf("Found %d HRAs by key", len(hras)), "key", name)
|
||||||
|
|
||||||
targets := autoscaler.searchScaleTargets(hras, f)
|
targets := autoscaler.searchScaleTargets(hras, f)
|
||||||
|
|
||||||
n := len(targets)
|
n := len(targets)
|
||||||
@@ -362,14 +404,16 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleUpTarget(ctx
|
|||||||
repositoryRunnerKey := owner + "/" + repo
|
repositoryRunnerKey := owner + "/" + repo
|
||||||
|
|
||||||
if target, err := autoscaler.getScaleTarget(ctx, repositoryRunnerKey, f); err != nil {
|
if target, err := autoscaler.getScaleTarget(ctx, repositoryRunnerKey, f); err != nil {
|
||||||
autoscaler.Log.Info("finding repository-wide runner", "repository", repositoryRunnerKey)
|
log.Info("finding repository-wide runner", "repository", repositoryRunnerKey)
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if target != nil {
|
} else if target != nil {
|
||||||
autoscaler.Log.Info("scale up target is repository-wide runners", "repository", repo)
|
log.Info("scale up target is repository-wide runners", "repository", repo)
|
||||||
return target, nil
|
return target, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if ownerType == "User" {
|
if ownerType == "User" {
|
||||||
|
log.V(1).Info("no repository runner found", "organization", owner)
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -379,12 +423,147 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleUpTarget(ctx
|
|||||||
} else if target != nil {
|
} else if target != nil {
|
||||||
log.Info("scale up target is organizational runners", "organization", owner)
|
log.Info("scale up target is organizational runners", "organization", owner)
|
||||||
return target, nil
|
return target, nil
|
||||||
|
} else {
|
||||||
|
log.V(1).Info("no repository runner or organizational runner found",
|
||||||
|
"repository", repositoryRunnerKey,
|
||||||
|
"organization", owner,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) tryScaleUp(ctx context.Context, target *ScaleTarget) error {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getJobScaleUpTargetForRepoOrOrg(ctx context.Context, log logr.Logger, repo, owner, ownerType string, labels []string) (*ScaleTarget, error) {
|
||||||
|
repositoryRunnerKey := owner + "/" + repo
|
||||||
|
|
||||||
|
if target, err := autoscaler.getJobScaleTarget(ctx, repositoryRunnerKey, labels); err != nil {
|
||||||
|
log.Info("finding repository-wide runner", "repository", repositoryRunnerKey)
|
||||||
|
return nil, err
|
||||||
|
} else if target != nil {
|
||||||
|
log.Info("job scale up target is repository-wide runners", "repository", repo)
|
||||||
|
return target, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ownerType == "User" {
|
||||||
|
log.V(1).Info("no repository runner found", "organization", owner)
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if target, err := autoscaler.getJobScaleTarget(ctx, owner, labels); err != nil {
|
||||||
|
log.Info("finding organizational runner", "organization", owner)
|
||||||
|
return nil, err
|
||||||
|
} else if target != nil {
|
||||||
|
log.Info("job scale up target is organizational runners", "organization", owner)
|
||||||
|
return target, nil
|
||||||
|
} else {
|
||||||
|
log.V(1).Info("no repository runner or organizational runner found",
|
||||||
|
"repository", repositoryRunnerKey,
|
||||||
|
"organization", owner,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getJobScaleTarget(ctx context.Context, name string, labels []string) (*ScaleTarget, error) {
|
||||||
|
hras, err := autoscaler.findHRAsByKey(ctx, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
autoscaler.Log.V(1).Info(fmt.Sprintf("Found %d HRAs by key", len(hras)), "key", name)
|
||||||
|
|
||||||
|
HRA:
|
||||||
|
for _, hra := range hras {
|
||||||
|
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(hra.Spec.ScaleUpTriggers) > 1 {
|
||||||
|
autoscaler.Log.V(1).Info("Skipping this HRA as it has too many ScaleUpTriggers to be used in workflow_job based scaling", "hra", hra.Name)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var duration metav1.Duration
|
||||||
|
|
||||||
|
if len(hra.Spec.ScaleUpTriggers) > 0 {
|
||||||
|
duration = hra.Spec.ScaleUpTriggers[0].Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
if duration.Duration <= 0 {
|
||||||
|
// Try to release the reserved capacity after at least 10 minutes by default,
|
||||||
|
// we won't end up in the reserved capacity remained forever in case GitHub somehow stopped sending us "completed" workflow_job events.
|
||||||
|
// GitHub usually send us those but nothing is 100% guaranteed, e.g. in case of something went wrong on GitHub :)
|
||||||
|
// Probably we'd better make this configurable via custom resources in the future?
|
||||||
|
duration.Duration = 10 * time.Minute
|
||||||
|
}
|
||||||
|
|
||||||
|
switch hra.Spec.ScaleTargetRef.Kind {
|
||||||
|
case "RunnerSet":
|
||||||
|
var rs v1alpha1.RunnerSet
|
||||||
|
|
||||||
|
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(labels) == 1 && labels[0] == "self-hosted" {
|
||||||
|
return &ScaleTarget{HorizontalRunnerAutoscaler: hra, ScaleUpTrigger: v1alpha1.ScaleUpTrigger{Duration: duration}}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that the RunnerSet-managed runners have all the labels requested by the workflow_job.
|
||||||
|
for _, l := range labels {
|
||||||
|
var matched bool
|
||||||
|
for _, l2 := range rs.Spec.Labels {
|
||||||
|
if l == l2 {
|
||||||
|
matched = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !matched {
|
||||||
|
continue HRA
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ScaleTarget{HorizontalRunnerAutoscaler: hra, ScaleUpTrigger: v1alpha1.ScaleUpTrigger{Duration: duration}}, nil
|
||||||
|
case "RunnerDeployment", "":
|
||||||
|
var rd v1alpha1.RunnerDeployment
|
||||||
|
|
||||||
|
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(labels) == 1 && labels[0] == "self-hosted" {
|
||||||
|
return &ScaleTarget{HorizontalRunnerAutoscaler: hra, ScaleUpTrigger: v1alpha1.ScaleUpTrigger{Duration: duration}}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that the RunnerDeployment-managed runners have all the labels requested by the workflow_job.
|
||||||
|
for _, l := range labels {
|
||||||
|
var matched bool
|
||||||
|
for _, l2 := range rd.Spec.Template.Labels {
|
||||||
|
if l == l2 {
|
||||||
|
matched = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !matched {
|
||||||
|
continue HRA
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ScaleTarget{HorizontalRunnerAutoscaler: hra, ScaleUpTrigger: v1alpha1.ScaleUpTrigger{Duration: duration}}, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported scaleTargetRef.kind: %v", hra.Spec.ScaleTargetRef.Kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) tryScale(ctx context.Context, target *ScaleTarget) error {
|
||||||
if target == nil {
|
if target == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -393,16 +572,38 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) tryScaleUp(ctx contex
|
|||||||
|
|
||||||
amount := 1
|
amount := 1
|
||||||
|
|
||||||
if target.ScaleUpTrigger.Amount > 0 {
|
if target.ScaleUpTrigger.Amount != 0 {
|
||||||
amount = target.ScaleUpTrigger.Amount
|
amount = target.ScaleUpTrigger.Amount
|
||||||
}
|
}
|
||||||
|
|
||||||
capacityReservations := getValidCapacityReservations(copy)
|
capacityReservations := getValidCapacityReservations(copy)
|
||||||
|
|
||||||
copy.Spec.CapacityReservations = append(capacityReservations, v1alpha1.CapacityReservation{
|
if amount > 0 {
|
||||||
ExpirationTime: metav1.Time{Time: time.Now().Add(target.ScaleUpTrigger.Duration.Duration)},
|
copy.Spec.CapacityReservations = append(capacityReservations, v1alpha1.CapacityReservation{
|
||||||
Replicas: amount,
|
ExpirationTime: metav1.Time{Time: time.Now().Add(target.ScaleUpTrigger.Duration.Duration)},
|
||||||
})
|
Replicas: amount,
|
||||||
|
})
|
||||||
|
} else if amount < 0 {
|
||||||
|
var reservations []v1alpha1.CapacityReservation
|
||||||
|
|
||||||
|
var found bool
|
||||||
|
|
||||||
|
for _, r := range capacityReservations {
|
||||||
|
if !found && r.Replicas+amount == 0 {
|
||||||
|
found = true
|
||||||
|
} else {
|
||||||
|
reservations = append(reservations, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
copy.Spec.CapacityReservations = reservations
|
||||||
|
}
|
||||||
|
|
||||||
|
autoscaler.Log.Info(
|
||||||
|
"Patching hra for capacityReservations update",
|
||||||
|
"before", target.HorizontalRunnerAutoscaler.Spec.CapacityReservations,
|
||||||
|
"after", copy.Spec.CapacityReservations,
|
||||||
|
)
|
||||||
|
|
||||||
if err := autoscaler.Client.Patch(ctx, copy, client.MergeFrom(&target.HorizontalRunnerAutoscaler)); err != nil {
|
if err := autoscaler.Client.Patch(ctx, copy, client.MergeFrom(&target.HorizontalRunnerAutoscaler)); err != nil {
|
||||||
return fmt.Errorf("patching horizontalrunnerautoscaler to add capacity reservation: %w", err)
|
return fmt.Errorf("patching horizontalrunnerautoscaler to add capacity reservation: %w", err)
|
||||||
@@ -433,20 +634,33 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) SetupWithManager(mgr
|
|||||||
|
|
||||||
autoscaler.Recorder = mgr.GetEventRecorderFor(name)
|
autoscaler.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, func(rawObj runtime.Object) []string {
|
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, func(rawObj client.Object) []string {
|
||||||
hra := rawObj.(*v1alpha1.HorizontalRunnerAutoscaler)
|
hra := rawObj.(*v1alpha1.HorizontalRunnerAutoscaler)
|
||||||
|
|
||||||
if hra.Spec.ScaleTargetRef.Name == "" {
|
if hra.Spec.ScaleTargetRef.Name == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var rd v1alpha1.RunnerDeployment
|
switch hra.Spec.ScaleTargetRef.Kind {
|
||||||
|
case "", "RunnerDeployment":
|
||||||
|
var rd v1alpha1.RunnerDeployment
|
||||||
|
|
||||||
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
|
||||||
return nil
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return []string{rd.Spec.Template.Spec.Repository, rd.Spec.Template.Spec.Organization}
|
||||||
|
case "RunnerSet":
|
||||||
|
var rs v1alpha1.RunnerSet
|
||||||
|
|
||||||
|
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return []string{rs.Spec.Repository, rs.Spec.Organization}
|
||||||
}
|
}
|
||||||
|
|
||||||
return []string{rd.Spec.Template.Spec.Repository, rd.Spec.Template.Spec.Organization}
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
package controllers
|
package controllers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/google/go-github/v33/github"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/pkg/actionsglob"
|
||||||
"github.com/summerwind/actions-runner-controller/pkg/actionsglob"
|
"github.com/google/go-github/v37/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
@@ -38,6 +38,16 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(ev
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(scaleUpTrigger.GitHubEvent.CheckRun.Repositories) > 0 {
|
||||||
|
for _, repository := range scaleUpTrigger.GitHubEvent.CheckRun.Repositories {
|
||||||
|
if repository == *event.Repo.Name {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
package controllers
|
package controllers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/google/go-github/v33/github"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/google/go-github/v37/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPullRequestEvent(event *github.PullRequestEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPullRequestEvent(event *github.PullRequestEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
package controllers
|
package controllers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/google/go-github/v33/github"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/google/go-github/v37/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event *github.PushEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event *github.PushEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
|
||||||
|
|||||||
@@ -4,21 +4,22 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/go-logr/logr"
|
|
||||||
"github.com/google/go-github/v33/github"
|
|
||||||
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"github.com/google/go-github/v37/github"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
@@ -19,10 +19,13 @@ package controllers
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
corev1 "k8s.io/api/core/v1"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/github"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
@@ -33,8 +36,8 @@ import (
|
|||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/summerwind/actions-runner-controller/controllers/metrics"
|
"github.com/actions-runner-controller/actions-runner-controller/controllers/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -61,8 +64,7 @@ const defaultReplicas = 1
|
|||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/status,verbs=get;update;patch
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/status,verbs=get;update;patch
|
||||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
ctx := context.Background()
|
|
||||||
log := r.Log.WithValues("horizontalrunnerautoscaler", req.NamespacedName)
|
log := r.Log.WithValues("horizontalrunnerautoscaler", req.NamespacedName)
|
||||||
|
|
||||||
var hra v1alpha1.HorizontalRunnerAutoscaler
|
var hra v1alpha1.HorizontalRunnerAutoscaler
|
||||||
@@ -76,21 +78,191 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
|||||||
|
|
||||||
metrics.SetHorizontalRunnerAutoscalerSpec(hra.ObjectMeta, hra.Spec)
|
metrics.SetHorizontalRunnerAutoscalerSpec(hra.ObjectMeta, hra.Spec)
|
||||||
|
|
||||||
var rd v1alpha1.RunnerDeployment
|
kind := hra.Spec.ScaleTargetRef.Kind
|
||||||
if err := r.Get(ctx, types.NamespacedName{
|
|
||||||
Namespace: req.Namespace,
|
switch kind {
|
||||||
Name: hra.Spec.ScaleTargetRef.Name,
|
case "", "RunnerDeployment":
|
||||||
}, &rd); err != nil {
|
var rd v1alpha1.RunnerDeployment
|
||||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
if err := r.Get(ctx, types.NamespacedName{
|
||||||
|
Namespace: req.Namespace,
|
||||||
|
Name: hra.Spec.ScaleTargetRef.Name,
|
||||||
|
}, &rd); err != nil {
|
||||||
|
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !rd.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
st := r.scaleTargetFromRD(ctx, rd)
|
||||||
|
|
||||||
|
return r.reconcile(ctx, req, log, hra, st, func(newDesiredReplicas int) error {
|
||||||
|
currentDesiredReplicas := getIntOrDefault(rd.Spec.Replicas, defaultReplicas)
|
||||||
|
|
||||||
|
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
||||||
|
if currentDesiredReplicas != newDesiredReplicas {
|
||||||
|
copy := rd.DeepCopy()
|
||||||
|
copy.Spec.Replicas = &newDesiredReplicas
|
||||||
|
|
||||||
|
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
|
||||||
|
return fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
case "RunnerSet":
|
||||||
|
var rs v1alpha1.RunnerSet
|
||||||
|
if err := r.Get(ctx, types.NamespacedName{
|
||||||
|
Namespace: req.Namespace,
|
||||||
|
Name: hra.Spec.ScaleTargetRef.Name,
|
||||||
|
}, &rs); err != nil {
|
||||||
|
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !rs.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var replicas *int
|
||||||
|
|
||||||
|
if rs.Spec.Replicas != nil {
|
||||||
|
v := int(*rs.Spec.Replicas)
|
||||||
|
replicas = &v
|
||||||
|
}
|
||||||
|
|
||||||
|
st := scaleTarget{
|
||||||
|
st: rs.Name,
|
||||||
|
kind: "runnerset",
|
||||||
|
enterprise: rs.Spec.Enterprise,
|
||||||
|
org: rs.Spec.Organization,
|
||||||
|
repo: rs.Spec.Repository,
|
||||||
|
replicas: replicas,
|
||||||
|
getRunnerMap: func() (map[string]struct{}, error) {
|
||||||
|
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
||||||
|
var runnerPodList corev1.PodList
|
||||||
|
|
||||||
|
var opts []client.ListOption
|
||||||
|
|
||||||
|
opts = append(opts, client.InNamespace(rs.Namespace))
|
||||||
|
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts = append(opts, client.MatchingLabelsSelector{Selector: selector})
|
||||||
|
|
||||||
|
r.Log.V(2).Info("Finding runnerset's runner pods with selector", "ns", rs.Namespace)
|
||||||
|
|
||||||
|
if err := r.List(
|
||||||
|
ctx,
|
||||||
|
&runnerPodList,
|
||||||
|
opts...,
|
||||||
|
); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
runnerMap := make(map[string]struct{})
|
||||||
|
for _, items := range runnerPodList.Items {
|
||||||
|
runnerMap[items.Name] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return runnerMap, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.reconcile(ctx, req, log, hra, st, func(newDesiredReplicas int) error {
|
||||||
|
var replicas *int
|
||||||
|
if rs.Spec.Replicas != nil {
|
||||||
|
v := int(*rs.Spec.Replicas)
|
||||||
|
replicas = &v
|
||||||
|
}
|
||||||
|
currentDesiredReplicas := getIntOrDefault(replicas, defaultReplicas)
|
||||||
|
|
||||||
|
if currentDesiredReplicas != newDesiredReplicas {
|
||||||
|
copy := rs.DeepCopy()
|
||||||
|
v := int32(newDesiredReplicas)
|
||||||
|
copy.Spec.Replicas = &v
|
||||||
|
|
||||||
|
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rs)); err != nil {
|
||||||
|
return fmt.Errorf("patching runnerset to have %d replicas: %w", newDesiredReplicas, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if !rd.ObjectMeta.DeletionTimestamp.IsZero() {
|
log.Info(fmt.Sprintf("Unsupported scale target %s %s: kind %s is not supported. valid kinds are %s and %s", kind, hra.Spec.ScaleTargetRef.Name, kind, "RunnerDeployment", "RunnerSet"))
|
||||||
return ctrl.Result{}, nil
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HorizontalRunnerAutoscalerReconciler) scaleTargetFromRD(ctx context.Context, rd v1alpha1.RunnerDeployment) scaleTarget {
|
||||||
|
st := scaleTarget{
|
||||||
|
st: rd.Name,
|
||||||
|
kind: "runnerdeployment",
|
||||||
|
enterprise: rd.Spec.Template.Spec.Enterprise,
|
||||||
|
org: rd.Spec.Template.Spec.Organization,
|
||||||
|
repo: rd.Spec.Template.Spec.Repository,
|
||||||
|
replicas: rd.Spec.Replicas,
|
||||||
|
getRunnerMap: func() (map[string]struct{}, error) {
|
||||||
|
// return the list of runners in namespace. Horizontal Runner Autoscaler should only be responsible for scaling resources in its own ns.
|
||||||
|
var runnerList v1alpha1.RunnerList
|
||||||
|
|
||||||
|
var opts []client.ListOption
|
||||||
|
|
||||||
|
opts = append(opts, client.InNamespace(rd.Namespace))
|
||||||
|
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(getSelector(&rd))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts = append(opts, client.MatchingLabelsSelector{Selector: selector})
|
||||||
|
|
||||||
|
r.Log.V(2).Info("Finding runners with selector", "ns", rd.Namespace)
|
||||||
|
|
||||||
|
if err := r.List(
|
||||||
|
ctx,
|
||||||
|
&runnerList,
|
||||||
|
opts...,
|
||||||
|
); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
runnerMap := make(map[string]struct{})
|
||||||
|
for _, items := range runnerList.Items {
|
||||||
|
runnerMap[items.Name] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return runnerMap, nil
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return st
|
||||||
|
}
|
||||||
|
|
||||||
|
type scaleTarget struct {
|
||||||
|
st, kind string
|
||||||
|
enterprise, repo, org string
|
||||||
|
replicas *int
|
||||||
|
|
||||||
|
getRunnerMap func() (map[string]struct{}, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HorizontalRunnerAutoscalerReconciler) reconcile(ctx context.Context, req ctrl.Request, log logr.Logger, hra v1alpha1.HorizontalRunnerAutoscaler, st scaleTarget, updatedDesiredReplicas func(int) error) (ctrl.Result, error) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
newDesiredReplicas, computedReplicas, computedReplicasFromCache, err := r.computeReplicasWithCache(log, now, rd, hra)
|
minReplicas, active, upcoming, err := r.getMinReplicas(log, now, hra)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err, "Could not compute min replicas")
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
newDesiredReplicas, computedReplicas, computedReplicasFromCache, err := r.computeReplicasWithCache(log, now, st, hra, minReplicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||||
|
|
||||||
@@ -99,23 +271,13 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
currentDesiredReplicas := getIntOrDefault(rd.Spec.Replicas, defaultReplicas)
|
if err := updatedDesiredReplicas(newDesiredReplicas); err != nil {
|
||||||
|
return ctrl.Result{}, err
|
||||||
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
|
||||||
if currentDesiredReplicas != newDesiredReplicas {
|
|
||||||
copy := rd.DeepCopy()
|
|
||||||
copy.Spec.Replicas = &newDesiredReplicas
|
|
||||||
|
|
||||||
if err := r.Client.Patch(ctx, copy, client.MergeFrom(&rd)); err != nil {
|
|
||||||
return ctrl.Result{}, fmt.Errorf("patching runnerdeployment to have %d replicas: %w", newDesiredReplicas, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var updated *v1alpha1.HorizontalRunnerAutoscaler
|
updated := hra.DeepCopy()
|
||||||
|
|
||||||
if hra.Status.DesiredReplicas == nil || *hra.Status.DesiredReplicas != newDesiredReplicas {
|
if hra.Status.DesiredReplicas == nil || *hra.Status.DesiredReplicas != newDesiredReplicas {
|
||||||
updated = hra.DeepCopy()
|
|
||||||
|
|
||||||
if (hra.Status.DesiredReplicas == nil && newDesiredReplicas > 1) ||
|
if (hra.Status.DesiredReplicas == nil && newDesiredReplicas > 1) ||
|
||||||
(hra.Status.DesiredReplicas != nil && newDesiredReplicas > *hra.Status.DesiredReplicas) {
|
(hra.Status.DesiredReplicas != nil && newDesiredReplicas > *hra.Status.DesiredReplicas) {
|
||||||
|
|
||||||
@@ -126,10 +288,6 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
|||||||
}
|
}
|
||||||
|
|
||||||
if computedReplicasFromCache == nil {
|
if computedReplicasFromCache == nil {
|
||||||
if updated == nil {
|
|
||||||
updated = hra.DeepCopy()
|
|
||||||
}
|
|
||||||
|
|
||||||
cacheEntries := getValidCacheEntries(updated, now)
|
cacheEntries := getValidCacheEntries(updated, now)
|
||||||
|
|
||||||
var cacheDuration time.Duration
|
var cacheDuration time.Duration
|
||||||
@@ -147,11 +305,34 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if updated != nil {
|
var overridesSummary string
|
||||||
|
|
||||||
|
if (active != nil && upcoming == nil) || (active != nil && upcoming != nil && active.Period.EndTime.Before(upcoming.Period.StartTime)) {
|
||||||
|
after := defaultReplicas
|
||||||
|
if hra.Spec.MinReplicas != nil && *hra.Spec.MinReplicas >= 0 {
|
||||||
|
after = *hra.Spec.MinReplicas
|
||||||
|
}
|
||||||
|
|
||||||
|
overridesSummary = fmt.Sprintf("min=%d time=%s", after, active.Period.EndTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
if active == nil && upcoming != nil || (active != nil && upcoming != nil && active.Period.EndTime.After(upcoming.Period.StartTime)) {
|
||||||
|
if upcoming.ScheduledOverride.MinReplicas != nil {
|
||||||
|
overridesSummary = fmt.Sprintf("min=%d time=%s", *upcoming.ScheduledOverride.MinReplicas, upcoming.Period.StartTime)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if overridesSummary != "" {
|
||||||
|
updated.Status.ScheduledOverridesSummary = &overridesSummary
|
||||||
|
} else {
|
||||||
|
updated.Status.ScheduledOverridesSummary = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(hra.Status, updated.Status) {
|
||||||
metrics.SetHorizontalRunnerAutoscalerStatus(updated.ObjectMeta, updated.Status)
|
metrics.SetHorizontalRunnerAutoscalerStatus(updated.ObjectMeta, updated.Status)
|
||||||
|
|
||||||
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&hra)); err != nil {
|
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&hra)); err != nil {
|
||||||
return ctrl.Result{}, fmt.Errorf("patching horizontalrunnerautoscaler status to add cache entry: %w", err)
|
return ctrl.Result{}, fmt.Errorf("patching horizontalrunnerautoscaler status: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -184,12 +365,85 @@ func (r *HorizontalRunnerAutoscalerReconciler) SetupWithManager(mgr ctrl.Manager
|
|||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr.Logger, now time.Time, rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (int, int, *int, error) {
|
type Override struct {
|
||||||
|
ScheduledOverride v1alpha1.ScheduledOverride
|
||||||
|
Period Period
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HorizontalRunnerAutoscalerReconciler) matchScheduledOverrides(log logr.Logger, now time.Time, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, *Override, *Override, error) {
|
||||||
|
var minReplicas *int
|
||||||
|
var active, upcoming *Override
|
||||||
|
|
||||||
|
for _, o := range hra.Spec.ScheduledOverrides {
|
||||||
|
log.V(1).Info(
|
||||||
|
"Checking scheduled override",
|
||||||
|
"now", now,
|
||||||
|
"startTime", o.StartTime,
|
||||||
|
"endTime", o.EndTime,
|
||||||
|
"frequency", o.RecurrenceRule.Frequency,
|
||||||
|
"untilTime", o.RecurrenceRule.UntilTime,
|
||||||
|
)
|
||||||
|
|
||||||
|
a, u, err := MatchSchedule(
|
||||||
|
now, o.StartTime.Time, o.EndTime.Time,
|
||||||
|
RecurrenceRule{
|
||||||
|
Frequency: o.RecurrenceRule.Frequency,
|
||||||
|
UntilTime: o.RecurrenceRule.UntilTime.Time,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return minReplicas, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the first when there are two or more active scheduled overrides,
|
||||||
|
// as the spec defines that the earlier scheduled override is prioritized higher than later ones.
|
||||||
|
if a != nil && active == nil {
|
||||||
|
active = &Override{Period: *a, ScheduledOverride: o}
|
||||||
|
|
||||||
|
if o.MinReplicas != nil {
|
||||||
|
minReplicas = o.MinReplicas
|
||||||
|
|
||||||
|
log.V(1).Info(
|
||||||
|
"Found active scheduled override",
|
||||||
|
"activeStartTime", a.StartTime,
|
||||||
|
"activeEndTime", a.EndTime,
|
||||||
|
"activeMinReplicas", minReplicas,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if u != nil && (upcoming == nil || u.StartTime.Before(upcoming.Period.StartTime)) {
|
||||||
|
upcoming = &Override{Period: *u, ScheduledOverride: o}
|
||||||
|
|
||||||
|
log.V(1).Info(
|
||||||
|
"Found upcoming scheduled override",
|
||||||
|
"upcomingStartTime", u.StartTime,
|
||||||
|
"upcomingEndTime", u.EndTime,
|
||||||
|
"upcomingMinReplicas", o.MinReplicas,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return minReplicas, active, upcoming, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HorizontalRunnerAutoscalerReconciler) getMinReplicas(log logr.Logger, now time.Time, hra v1alpha1.HorizontalRunnerAutoscaler) (int, *Override, *Override, error) {
|
||||||
minReplicas := defaultReplicas
|
minReplicas := defaultReplicas
|
||||||
if hra.Spec.MinReplicas != nil && *hra.Spec.MinReplicas > 0 {
|
if hra.Spec.MinReplicas != nil && *hra.Spec.MinReplicas >= 0 {
|
||||||
minReplicas = *hra.Spec.MinReplicas
|
minReplicas = *hra.Spec.MinReplicas
|
||||||
}
|
}
|
||||||
|
|
||||||
|
m, active, upcoming, err := r.matchScheduledOverrides(log, now, hra)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, nil, err
|
||||||
|
} else if m != nil {
|
||||||
|
minReplicas = *m
|
||||||
|
}
|
||||||
|
|
||||||
|
return minReplicas, active, upcoming, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr.Logger, now time.Time, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, minReplicas int) (int, int, *int, error) {
|
||||||
var suggestedReplicas int
|
var suggestedReplicas int
|
||||||
|
|
||||||
suggestedReplicasFromCache := r.fetchSuggestedReplicasFromCache(hra)
|
suggestedReplicasFromCache := r.fetchSuggestedReplicasFromCache(hra)
|
||||||
@@ -205,7 +459,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr
|
|||||||
suggestedReplicas = *cached
|
suggestedReplicas = *cached
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
v, err := r.suggestDesiredReplicas(rd, hra)
|
v, err := r.suggestDesiredReplicas(st, hra)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, nil, err
|
return 0, 0, nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
package controllers
|
package controllers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/google/go-cmp/cmp"
|
|
||||||
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGetValidCacheEntries(t *testing.T) {
|
func TestGetValidCacheEntries(t *testing.T) {
|
||||||
|
|||||||
@@ -7,11 +7,10 @@ import (
|
|||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-github/v33/github"
|
github2 "github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
github2 "github.com/summerwind/actions-runner-controller/github"
|
"github.com/google/go-github/v37/github"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/github/fake"
|
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@@ -24,7 +23,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type testEnvironment struct {
|
type testEnvironment struct {
|
||||||
@@ -52,8 +51,9 @@ var (
|
|||||||
// * starting all the reconcilers
|
// * starting all the reconcilers
|
||||||
// * stopping all the reconcilers after the test ends
|
// * stopping all the reconcilers after the test ends
|
||||||
// Call this function at the start of each of your tests.
|
// Call this function at the start of each of your tests.
|
||||||
func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
|
||||||
var stopCh chan struct{}
|
var ctx context.Context
|
||||||
|
var cancel func()
|
||||||
ns := &corev1.Namespace{}
|
ns := &corev1.Namespace{}
|
||||||
|
|
||||||
env := &testEnvironment{
|
env := &testEnvironment{
|
||||||
@@ -63,7 +63,7 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
|||||||
}
|
}
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
stopCh = make(chan struct{})
|
ctx, cancel = context.WithCancel(ctx2)
|
||||||
*ns = corev1.Namespace{
|
*ns = corev1.Namespace{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
|
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
|
||||||
}
|
}
|
||||||
@@ -166,13 +166,13 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
|||||||
go func() {
|
go func() {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
|
|
||||||
err := mgr.Start(stopCh)
|
err := mgr.Start(ctx)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
||||||
}()
|
}()
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
close(stopCh)
|
defer cancel()
|
||||||
|
|
||||||
env.fakeGithubServer.Close()
|
env.fakeGithubServer.Close()
|
||||||
env.webhookServer.Close()
|
env.webhookServer.Close()
|
||||||
@@ -214,11 +214,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Organization: "test",
|
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||||
Image: "bar",
|
Organization: "test",
|
||||||
Group: "baz",
|
Image: "bar",
|
||||||
Env: []corev1.EnvVar{
|
Group: "baz",
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
},
|
||||||
|
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -301,11 +305,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "test/valid",
|
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||||
Image: "bar",
|
Repository: "test/valid",
|
||||||
Group: "baz",
|
Image: "bar",
|
||||||
Env: []corev1.EnvVar{
|
Group: "baz",
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
},
|
||||||
|
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -432,11 +440,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "test/valid",
|
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||||
Image: "bar",
|
Repository: "test/valid",
|
||||||
Group: "baz",
|
Image: "bar",
|
||||||
Env: []corev1.EnvVar{
|
Group: "baz",
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
},
|
||||||
|
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -446,9 +458,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
||||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -477,8 +486,9 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
{
|
{
|
||||||
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
|
||||||
CheckRun: &actionsv1alpha1.CheckRunSpec{
|
CheckRun: &actionsv1alpha1.CheckRunSpec{
|
||||||
Types: []string{"created"},
|
Types: []string{"created"},
|
||||||
Status: "pending",
|
Status: "pending",
|
||||||
|
Repositories: []string{"valid", "foo", "bar"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Amount: 1,
|
Amount: 1,
|
||||||
@@ -508,12 +518,23 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Scale-up to 5 replicas on second check_run create webhook event
|
// Scale-up to 5 replicas on second check_run create webhook event
|
||||||
|
replicasAfterSecondWebhook := 5
|
||||||
{
|
{
|
||||||
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
||||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 5, "runners after second webhook event")
|
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, replicasAfterSecondWebhook, "runners after second webhook event")
|
||||||
env.ExpectRegisteredNumberCountEventuallyEquals(5, "count of fake list runners")
|
env.ExpectRegisteredNumberCountEventuallyEquals(replicasAfterSecondWebhook, "count of fake list runners")
|
||||||
env.SyncRunnerRegistrations()
|
env.SyncRunnerRegistrations()
|
||||||
ExpectRunnerCountEventuallyEquals(ctx, ns.Name, 5)
|
ExpectRunnerCountEventuallyEquals(ctx, ns.Name, replicasAfterSecondWebhook)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not scale-up on third check_run create webhook event
|
||||||
|
// example repo is not in specified in actionsv1alpha1.CheckRunSpec.Repositories
|
||||||
|
{
|
||||||
|
env.SendOrgCheckRunEvent("test", "example", "pending", "created")
|
||||||
|
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, replicasAfterSecondWebhook, "runners after third webhook event")
|
||||||
|
env.ExpectRegisteredNumberCountEventuallyEquals(replicasAfterSecondWebhook, "count of fake list runners")
|
||||||
|
env.SyncRunnerRegistrations()
|
||||||
|
ExpectRunnerCountEventuallyEquals(ctx, ns.Name, replicasAfterSecondWebhook)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -540,11 +561,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "test/valid",
|
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||||
Image: "bar",
|
Repository: "test/valid",
|
||||||
Group: "baz",
|
Image: "bar",
|
||||||
Env: []corev1.EnvVar{
|
Group: "baz",
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
},
|
||||||
|
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -554,9 +579,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
||||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -595,9 +617,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
|
|
||||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -606,9 +625,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
||||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
||||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 2, "runners after first webhook event")
|
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 2, "runners after first webhook event")
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
env.ExpectRegisteredNumberCountEventuallyEquals(2, "count of fake list runners")
|
env.ExpectRegisteredNumberCountEventuallyEquals(2, "count of fake list runners")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -616,9 +632,8 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
{
|
{
|
||||||
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
env.SendOrgCheckRunEvent("test", "valid", "pending", "created")
|
||||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3, "runners after second webhook event")
|
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3, "runners after second webhook event")
|
||||||
|
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
||||||
}
|
}
|
||||||
|
|
||||||
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should create and scale user's repository runners on pull_request event", func() {
|
It("should create and scale user's repository runners on pull_request event", func() {
|
||||||
@@ -644,11 +659,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "test/valid",
|
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||||
Image: "bar",
|
Repository: "test/valid",
|
||||||
Group: "baz",
|
Image: "bar",
|
||||||
Env: []corev1.EnvVar{
|
Group: "baz",
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
},
|
||||||
|
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -777,11 +796,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "test/valid",
|
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||||
Image: "bar",
|
Repository: "test/valid",
|
||||||
Group: "baz",
|
Image: "bar",
|
||||||
Env: []corev1.EnvVar{
|
Group: "baz",
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
},
|
||||||
|
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -870,11 +893,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "test/valid",
|
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||||
Image: "bar",
|
Repository: "test/valid",
|
||||||
Group: "baz",
|
Image: "bar",
|
||||||
Env: []corev1.EnvVar{
|
Group: "baz",
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
},
|
||||||
|
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -884,9 +911,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
ExpectCreate(ctx, rd, "test RunnerDeployment")
|
||||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 1)
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
env.ExpectRegisteredNumberCountEventuallyEquals(1, "count of fake list runners")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -930,9 +954,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
|
|
||||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1)
|
||||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3)
|
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3)
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -941,9 +962,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
env.SendUserCheckRunEvent("test", "valid", "pending", "created")
|
env.SendUserCheckRunEvent("test", "valid", "pending", "created")
|
||||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
||||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 4, "runners after first webhook event")
|
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 4, "runners after first webhook event")
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
env.ExpectRegisteredNumberCountEventuallyEquals(4, "count of fake list runners")
|
env.ExpectRegisteredNumberCountEventuallyEquals(4, "count of fake list runners")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -951,9 +969,8 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
{
|
{
|
||||||
env.SendUserCheckRunEvent("test", "valid", "pending", "created")
|
env.SendUserCheckRunEvent("test", "valid", "pending", "created")
|
||||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 5, "runners after second webhook event")
|
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 5, "runners after second webhook event")
|
||||||
|
env.ExpectRegisteredNumberCountEventuallyEquals(5, "count of fake list runners")
|
||||||
}
|
}
|
||||||
|
|
||||||
env.ExpectRegisteredNumberCountEventuallyEquals(5, "count of fake list runners")
|
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should create and scale user's repository runners only on check_run event", func() {
|
It("should create and scale user's repository runners only on check_run event", func() {
|
||||||
@@ -979,11 +996,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "test/valid",
|
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||||
Image: "bar",
|
Repository: "test/valid",
|
||||||
Group: "baz",
|
Image: "bar",
|
||||||
Env: []corev1.EnvVar{
|
Group: "baz",
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
},
|
||||||
|
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -1045,9 +1066,6 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
env.SendUserCheckRunEvent("test", "valid", "pending", "created")
|
env.SendUserCheckRunEvent("test", "valid", "pending", "created")
|
||||||
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
ExpectRunnerSetsCountEventuallyEquals(ctx, ns.Name, 1, "runner sets after webhook")
|
||||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 2, "runners after first webhook event")
|
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 2, "runners after first webhook event")
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
env.ExpectRegisteredNumberCountEventuallyEquals(2, "count of fake list runners")
|
env.ExpectRegisteredNumberCountEventuallyEquals(2, "count of fake list runners")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1055,9 +1073,8 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
|||||||
{
|
{
|
||||||
env.SendUserCheckRunEvent("test", "valid", "pending", "created")
|
env.SendUserCheckRunEvent("test", "valid", "pending", "created")
|
||||||
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3, "runners after second webhook event")
|
ExpectRunnerSetsManagedReplicasCountEventuallyEquals(ctx, ns.Name, 3, "runners after second webhook event")
|
||||||
|
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
||||||
}
|
}
|
||||||
|
|
||||||
env.ExpectRegisteredNumberCountEventuallyEquals(3, "count of fake list runners")
|
|
||||||
})
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
@@ -1202,7 +1219,7 @@ func (env *testEnvironment) SyncRunnerRegistrations() {
|
|||||||
env.fakeRunnerList.Sync(runnerList.Items)
|
env.fakeRunnerList.Sync(runnerList.Items)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExpectCreate(ctx context.Context, rd runtime.Object, s string) {
|
func ExpectCreate(ctx context.Context, rd client.Object, s string) {
|
||||||
err := k8sClient.Create(ctx, rd)
|
err := k8sClient.Create(ctx, rd)
|
||||||
|
|
||||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), fmt.Sprintf("failed to create %s resource", s))
|
ExpectWithOffset(1, err).NotTo(HaveOccurred(), fmt.Sprintf("failed to create %s resource", s))
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
package metrics
|
package metrics
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
package metrics
|
package metrics
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
37
controllers/metrics/runnerset.go
Normal file
37
controllers/metrics/runnerset.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
rsName = "runnerset"
|
||||||
|
rsNamespace = "namespace"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
runnerSetMetrics = []prometheus.Collector{
|
||||||
|
runnerSetReplicas,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
runnerSetReplicas = prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Name: "runnerset_spec_replicas",
|
||||||
|
Help: "replicas of RunnerSet",
|
||||||
|
},
|
||||||
|
[]string{rsName, rsNamespace},
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
func SetRunnerSet(rd v1alpha1.RunnerSet) {
|
||||||
|
labels := prometheus.Labels{
|
||||||
|
rsName: rd.Name,
|
||||||
|
rsNamespace: rd.Namespace,
|
||||||
|
}
|
||||||
|
if rd.Spec.Replicas != nil {
|
||||||
|
runnerSetReplicas.With(labels).Set(float64(*rd.Spec.Replicas))
|
||||||
|
}
|
||||||
|
}
|
||||||
132
controllers/pod_runner_token_injector.go
Normal file
132
controllers/pod_runner_token_injector.go
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"gomodules.xyz/jsonpatch/v2"
|
||||||
|
admissionv1 "k8s.io/api/admission/v1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
AnnotationKeyTokenExpirationDate = "actions-runner-controller/token-expires-at"
|
||||||
|
)
|
||||||
|
|
||||||
|
// +kubebuilder:webhook:path=/mutate-runner-set-pod,mutating=true,failurePolicy=ignore,groups="",resources=pods,verbs=create,versions=v1,name=mutate-runner-pod.webhook.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
|
||||||
|
|
||||||
|
type PodRunnerTokenInjector struct {
|
||||||
|
client.Client
|
||||||
|
|
||||||
|
Name string
|
||||||
|
Log logr.Logger
|
||||||
|
Recorder record.EventRecorder
|
||||||
|
GitHubClient *github.Client
|
||||||
|
decoder *admission.Decoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *PodRunnerTokenInjector) Handle(ctx context.Context, req admission.Request) admission.Response {
|
||||||
|
var pod corev1.Pod
|
||||||
|
err := t.decoder.Decode(req, &pod)
|
||||||
|
if err != nil {
|
||||||
|
t.Log.Error(err, "Failed to decode request object")
|
||||||
|
return admission.Errored(http.StatusBadRequest, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pod.Annotations == nil {
|
||||||
|
pod.Annotations = map[string]string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var runnerContainer *corev1.Container
|
||||||
|
|
||||||
|
for i := range pod.Spec.Containers {
|
||||||
|
c := pod.Spec.Containers[i]
|
||||||
|
|
||||||
|
if c.Name == "runner" {
|
||||||
|
runnerContainer = &c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if runnerContainer == nil {
|
||||||
|
return newEmptyResponse()
|
||||||
|
}
|
||||||
|
|
||||||
|
enterprise, okEnterprise := getEnv(runnerContainer, "RUNNER_ENTERPRISE")
|
||||||
|
repo, okRepo := getEnv(runnerContainer, "RUNNER_REPO")
|
||||||
|
org, okOrg := getEnv(runnerContainer, "RUNNER_ORG")
|
||||||
|
if !okRepo || !okOrg || !okEnterprise {
|
||||||
|
return newEmptyResponse()
|
||||||
|
}
|
||||||
|
|
||||||
|
rt, err := t.GitHubClient.GetRegistrationToken(context.Background(), enterprise, org, repo, pod.Name)
|
||||||
|
if err != nil {
|
||||||
|
t.Log.Error(err, "Failed to get new registration token")
|
||||||
|
return admission.Errored(http.StatusInternalServerError, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ts := rt.GetExpiresAt().Format(time.RFC3339)
|
||||||
|
|
||||||
|
updated := mutatePod(&pod, *rt.Token)
|
||||||
|
|
||||||
|
updated.Annotations[AnnotationKeyTokenExpirationDate] = ts
|
||||||
|
|
||||||
|
if pod.Spec.RestartPolicy != corev1.RestartPolicyOnFailure {
|
||||||
|
updated.Spec.RestartPolicy = corev1.RestartPolicyOnFailure
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err := json.Marshal(updated)
|
||||||
|
if err != nil {
|
||||||
|
t.Log.Error(err, "Failed to encode new object")
|
||||||
|
return admission.Errored(http.StatusInternalServerError, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
res := admission.PatchResponseFromRaw(req.Object.Raw, buf)
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func getEnv(container *corev1.Container, key string) (string, bool) {
|
||||||
|
for _, env := range container.Env {
|
||||||
|
if env.Name == key {
|
||||||
|
return env.Value, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *PodRunnerTokenInjector) InjectDecoder(d *admission.Decoder) error {
|
||||||
|
t.decoder = d
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEmptyResponse() admission.Response {
|
||||||
|
pt := admissionv1.PatchTypeJSONPatch
|
||||||
|
return admission.Response{
|
||||||
|
Patches: []jsonpatch.Operation{},
|
||||||
|
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||||
|
Allowed: true,
|
||||||
|
PatchType: &pt,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *PodRunnerTokenInjector) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
name := "pod-runner-token-injector"
|
||||||
|
if r.Name != "" {
|
||||||
|
name = r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
|
mgr.GetWebhookServer().Register("/mutate-runner-set-pod", &admission.Webhook{Handler: r})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -23,8 +23,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
gogithub "github.com/google/go-github/v33/github"
|
"github.com/actions-runner-controller/actions-runner-controller/hash"
|
||||||
"github.com/summerwind/actions-runner-controller/hash"
|
gogithub "github.com/google/go-github/v37/github"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
@@ -37,8 +37,8 @@ import (
|
|||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/summerwind/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -48,6 +48,13 @@ const (
|
|||||||
LabelKeyPodTemplateHash = "pod-template-hash"
|
LabelKeyPodTemplateHash = "pod-template-hash"
|
||||||
|
|
||||||
retryDelayOnGitHubAPIRateLimitError = 30 * time.Second
|
retryDelayOnGitHubAPIRateLimitError = 30 * time.Second
|
||||||
|
|
||||||
|
// This is an annotation internal to actions-runner-controller and can change in backward-incompatible ways
|
||||||
|
annotationKeyRegistrationOnly = "actions-runner-controller/registration-only"
|
||||||
|
|
||||||
|
EnvVarOrg = "RUNNER_ORG"
|
||||||
|
EnvVarRepo = "RUNNER_REPO"
|
||||||
|
EnvVarEnterprise = "RUNNER_ENTERPRISE"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunnerReconciler reconciles a Runner object
|
// RunnerReconciler reconciles a Runner object
|
||||||
@@ -59,6 +66,7 @@ type RunnerReconciler struct {
|
|||||||
GitHubClient *github.Client
|
GitHubClient *github.Client
|
||||||
RunnerImage string
|
RunnerImage string
|
||||||
DockerImage string
|
DockerImage string
|
||||||
|
DockerRegistryMirror string
|
||||||
Name string
|
Name string
|
||||||
RegistrationRecheckInterval time.Duration
|
RegistrationRecheckInterval time.Duration
|
||||||
RegistrationRecheckJitter time.Duration
|
RegistrationRecheckJitter time.Duration
|
||||||
@@ -71,8 +79,7 @@ type RunnerReconciler struct {
|
|||||||
// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
|
||||||
func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
ctx := context.Background()
|
|
||||||
log := r.Log.WithValues("runner", req.NamespacedName)
|
log := r.Log.WithValues("runner", req.NamespacedName)
|
||||||
|
|
||||||
var runner v1alpha1.Runner
|
var runner v1alpha1.Runner
|
||||||
@@ -87,7 +94,7 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if runner.ObjectMeta.DeletionTimestamp.IsZero() {
|
if runner.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
finalizers, added := addFinalizer(runner.ObjectMeta.Finalizers)
|
finalizers, added := addFinalizer(runner.ObjectMeta.Finalizers, finalizerName)
|
||||||
|
|
||||||
if added {
|
if added {
|
||||||
newRunner := runner.DeepCopy()
|
newRunner := runner.DeepCopy()
|
||||||
@@ -101,7 +108,7 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
finalizers, removed := removeFinalizer(runner.ObjectMeta.Finalizers)
|
finalizers, removed := removeFinalizer(runner.ObjectMeta.Finalizers, finalizerName)
|
||||||
|
|
||||||
if removed {
|
if removed {
|
||||||
if len(runner.Status.Registration.Token) > 0 {
|
if len(runner.Status.Registration.Token) > 0 {
|
||||||
@@ -145,6 +152,34 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
return ctrl.Result{}, nil
|
return ctrl.Result{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
registrationOnly := metav1.HasAnnotation(runner.ObjectMeta, annotationKeyRegistrationOnly)
|
||||||
|
if registrationOnly && runner.Status.Phase != "" {
|
||||||
|
// At this point we are sure that the registration-only runner has successfully configured and
|
||||||
|
// is of `offline` status, because we set runner.Status.Phase to that of the runner pod only after
|
||||||
|
// successful registration.
|
||||||
|
|
||||||
|
var pod corev1.Pod
|
||||||
|
if err := r.Get(ctx, req.NamespacedName, &pod); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
log.Info(fmt.Sprintf("Retrying soon as we failed to get registration-only runner pod: %v", err))
|
||||||
|
|
||||||
|
return ctrl.Result{Requeue: true}, nil
|
||||||
|
}
|
||||||
|
} else if err := r.Delete(ctx, &pod); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
log.Info(fmt.Sprintf("Retrying soon as we failed to delete registration-only runner pod: %v", err))
|
||||||
|
|
||||||
|
return ctrl.Result{Requeue: true}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Successfully deleted egistration-only runner pod to free node and cluster resource")
|
||||||
|
|
||||||
|
// Return here to not recreate the deleted pod, because recreating it is the waste of cluster and node resource,
|
||||||
|
// and also defeats the original purpose of scale-from/to-zero we're trying to implement by using the registration-only runner.
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
var pod corev1.Pod
|
var pod corev1.Pod
|
||||||
if err := r.Get(ctx, req.NamespacedName, &pod); err != nil {
|
if err := r.Get(ctx, req.NamespacedName, &pod); err != nil {
|
||||||
if !kerrors.IsNotFound(err) {
|
if !kerrors.IsNotFound(err) {
|
||||||
@@ -221,20 +256,33 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
|
|
||||||
// If pod has ended up succeeded we need to restart it
|
// If pod has ended up succeeded we need to restart it
|
||||||
// Happens e.g. when dind is in runner and run completes
|
// Happens e.g. when dind is in runner and run completes
|
||||||
restart := pod.Status.Phase == corev1.PodSucceeded
|
stopped := pod.Status.Phase == corev1.PodSucceeded
|
||||||
|
|
||||||
if pod.Status.Phase == corev1.PodRunning {
|
if !stopped {
|
||||||
for _, status := range pod.Status.ContainerStatuses {
|
if pod.Status.Phase == corev1.PodRunning {
|
||||||
if status.Name != containerName {
|
for _, status := range pod.Status.ContainerStatuses {
|
||||||
continue
|
if status.Name != containerName {
|
||||||
}
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if status.State.Terminated != nil && status.State.Terminated.ExitCode == 0 {
|
if status.State.Terminated != nil && status.State.Terminated.ExitCode == 0 {
|
||||||
restart = true
|
stopped = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
restart := stopped
|
||||||
|
|
||||||
|
if registrationOnly && stopped {
|
||||||
|
restart = false
|
||||||
|
|
||||||
|
log.Info(
|
||||||
|
"Observed that registration-only runner for scaling-from-zero has successfully stopped. " +
|
||||||
|
"Unlike other pods, this one will be recreated only when runner spec changes.",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
if updated, err := r.updateRegistrationToken(ctx, runner); err != nil {
|
if updated, err := r.updateRegistrationToken(ctx, runner); err != nil {
|
||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
} else if updated {
|
} else if updated {
|
||||||
@@ -247,11 +295,21 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if registrationOnly {
|
||||||
|
newPod.Spec.Containers[0].Env = append(
|
||||||
|
newPod.Spec.Containers[0].Env,
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "RUNNER_REGISTRATION_ONLY",
|
||||||
|
Value: "true",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
var registrationRecheckDelay time.Duration
|
var registrationRecheckDelay time.Duration
|
||||||
|
|
||||||
// all checks done below only decide whether a restart is needed
|
// all checks done below only decide whether a restart is needed
|
||||||
// if a restart was already decided before, there is no need for the checks
|
// if a restart was already decided before, there is no need for the checks
|
||||||
// saving API calls and scary{ log messages
|
// saving API calls and scary log messages
|
||||||
if !restart {
|
if !restart {
|
||||||
registrationCheckInterval := time.Minute
|
registrationCheckInterval := time.Minute
|
||||||
if r.RegistrationRecheckInterval > 0 {
|
if r.RegistrationRecheckInterval > 0 {
|
||||||
@@ -342,7 +400,7 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
"Runner failed to register itself to GitHub in timely manner. "+
|
"Runner failed to register itself to GitHub in timely manner. "+
|
||||||
"Recreating the pod to see if it resolves the issue. "+
|
"Recreating the pod to see if it resolves the issue. "+
|
||||||
"CAUTION: If you see this a lot, you should investigate the root cause. "+
|
"CAUTION: If you see this a lot, you should investigate the root cause. "+
|
||||||
"See https://github.com/summerwind/actions-runner-controller/issues/288",
|
"See https://github.com/actions-runner-controller/actions-runner-controller/issues/288",
|
||||||
"podCreationTimestamp", pod.CreationTimestamp,
|
"podCreationTimestamp", pod.CreationTimestamp,
|
||||||
"currentTime", currentTime,
|
"currentTime", currentTime,
|
||||||
"configuredRegistrationTimeout", registrationTimeout,
|
"configuredRegistrationTimeout", registrationTimeout,
|
||||||
@@ -356,7 +414,14 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
} else if offline {
|
} else if offline {
|
||||||
if registrationDidTimeout {
|
if registrationOnly {
|
||||||
|
log.Info(
|
||||||
|
"Observed that registration-only runner for scaling-from-zero has successfully been registered.",
|
||||||
|
"podCreationTimestamp", pod.CreationTimestamp,
|
||||||
|
"currentTime", currentTime,
|
||||||
|
"configuredRegistrationTimeout", registrationTimeout,
|
||||||
|
)
|
||||||
|
} else if registrationDidTimeout {
|
||||||
log.Info(
|
log.Info(
|
||||||
"Already existing GitHub runner still appears offline . "+
|
"Already existing GitHub runner still appears offline . "+
|
||||||
"Recreating the pod to see if it resolves the issue. "+
|
"Recreating the pod to see if it resolves the issue. "+
|
||||||
@@ -375,7 +440,7 @@ func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (notFound || offline) && !registrationDidTimeout {
|
if (notFound || (offline && !registrationOnly)) && !registrationDidTimeout {
|
||||||
registrationRecheckJitter := 10 * time.Second
|
registrationRecheckJitter := 10 * time.Second
|
||||||
if r.RegistrationRecheckJitter > 0 {
|
if r.RegistrationRecheckJitter > 0 {
|
||||||
registrationRecheckJitter = r.RegistrationRecheckJitter
|
registrationRecheckJitter = r.RegistrationRecheckJitter
|
||||||
@@ -502,75 +567,11 @@ func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||||
var (
|
var template corev1.Pod
|
||||||
privileged bool = true
|
|
||||||
dockerdInRunner bool = runner.Spec.DockerdWithinRunnerContainer != nil && *runner.Spec.DockerdWithinRunnerContainer
|
|
||||||
dockerEnabled bool = runner.Spec.DockerEnabled == nil || *runner.Spec.DockerEnabled
|
|
||||||
)
|
|
||||||
|
|
||||||
runnerImage := runner.Spec.Image
|
|
||||||
if runnerImage == "" {
|
|
||||||
runnerImage = r.RunnerImage
|
|
||||||
}
|
|
||||||
|
|
||||||
workDir := runner.Spec.WorkDir
|
|
||||||
if workDir == "" {
|
|
||||||
workDir = "/runner/_work"
|
|
||||||
}
|
|
||||||
|
|
||||||
runnerImagePullPolicy := runner.Spec.ImagePullPolicy
|
|
||||||
if runnerImagePullPolicy == "" {
|
|
||||||
runnerImagePullPolicy = corev1.PullAlways
|
|
||||||
}
|
|
||||||
|
|
||||||
env := []corev1.EnvVar{
|
|
||||||
{
|
|
||||||
Name: "RUNNER_NAME",
|
|
||||||
Value: runner.Name,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "RUNNER_ORG",
|
|
||||||
Value: runner.Spec.Organization,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "RUNNER_REPO",
|
|
||||||
Value: runner.Spec.Repository,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "RUNNER_ENTERPRISE",
|
|
||||||
Value: runner.Spec.Enterprise,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "RUNNER_LABELS",
|
|
||||||
Value: strings.Join(runner.Spec.Labels, ","),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "RUNNER_GROUP",
|
|
||||||
Value: runner.Spec.Group,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "RUNNER_TOKEN",
|
|
||||||
Value: runner.Status.Registration.Token,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "DOCKERD_IN_RUNNER",
|
|
||||||
Value: fmt.Sprintf("%v", dockerdInRunner),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GITHUB_URL",
|
|
||||||
Value: r.GitHubClient.GithubBaseURL,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "RUNNER_WORKDIR",
|
|
||||||
Value: workDir,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
env = append(env, runner.Spec.Env...)
|
|
||||||
|
|
||||||
labels := map[string]string{}
|
labels := map[string]string{}
|
||||||
|
|
||||||
for k, v := range runner.Labels {
|
for k, v := range runner.ObjectMeta.Labels {
|
||||||
labels[k] = v
|
labels[k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -592,45 +593,289 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
// A registered runner's session and the a registration token seem to have two different and independent
|
// A registered runner's session and the a registration token seem to have two different and independent
|
||||||
// lifecycles.
|
// lifecycles.
|
||||||
//
|
//
|
||||||
// See https://github.com/summerwind/actions-runner-controller/issues/143 for more context.
|
// See https://github.com/actions-runner-controller/actions-runner-controller/issues/143 for more context.
|
||||||
labels[LabelKeyPodTemplateHash] = hash.FNVHashStringObjects(
|
labels[LabelKeyPodTemplateHash] = hash.FNVHashStringObjects(
|
||||||
filterLabels(runner.Labels, LabelKeyRunnerTemplateHash),
|
filterLabels(runner.ObjectMeta.Labels, LabelKeyRunnerTemplateHash),
|
||||||
runner.Annotations,
|
runner.ObjectMeta.Annotations,
|
||||||
runner.Spec,
|
runner.Spec,
|
||||||
r.GitHubClient.GithubBaseURL,
|
r.GitHubClient.GithubBaseURL,
|
||||||
)
|
)
|
||||||
|
|
||||||
pod := corev1.Pod{
|
objectMeta := metav1.ObjectMeta{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
Name: runner.ObjectMeta.Name,
|
||||||
Name: runner.Name,
|
Namespace: runner.ObjectMeta.Namespace,
|
||||||
Namespace: runner.Namespace,
|
Labels: labels,
|
||||||
Labels: labels,
|
Annotations: runner.ObjectMeta.Annotations,
|
||||||
Annotations: runner.Annotations,
|
}
|
||||||
},
|
|
||||||
Spec: corev1.PodSpec{
|
template.ObjectMeta = objectMeta
|
||||||
RestartPolicy: "OnFailure",
|
|
||||||
Containers: []corev1.Container{
|
if len(runner.Spec.Containers) == 0 {
|
||||||
{
|
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
||||||
Name: containerName,
|
Name: "runner",
|
||||||
Image: runnerImage,
|
ImagePullPolicy: runner.Spec.ImagePullPolicy,
|
||||||
ImagePullPolicy: runnerImagePullPolicy,
|
EnvFrom: runner.Spec.EnvFrom,
|
||||||
Env: env,
|
Env: runner.Spec.Env,
|
||||||
EnvFrom: runner.Spec.EnvFrom,
|
Resources: runner.Spec.Resources,
|
||||||
SecurityContext: &corev1.SecurityContext{
|
})
|
||||||
// Runner need to run privileged if it contains DinD
|
|
||||||
Privileged: runner.Spec.DockerdWithinRunnerContainer,
|
if runner.Spec.DockerdWithinRunnerContainer == nil || !*runner.Spec.DockerdWithinRunnerContainer {
|
||||||
},
|
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
||||||
Resources: runner.Spec.Resources,
|
Name: "docker",
|
||||||
|
VolumeMounts: runner.Spec.DockerVolumeMounts,
|
||||||
|
Resources: runner.Spec.DockerdContainerResources,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
template.Spec.Containers = runner.Spec.Containers
|
||||||
|
}
|
||||||
|
|
||||||
|
template.Spec.SecurityContext = runner.Spec.SecurityContext
|
||||||
|
template.Spec.EnableServiceLinks = runner.Spec.EnableServiceLinks
|
||||||
|
|
||||||
|
registrationOnly := metav1.HasAnnotation(runner.ObjectMeta, annotationKeyRegistrationOnly)
|
||||||
|
|
||||||
|
pod, err := newRunnerPod(template, runner.Spec.RunnerConfig, r.RunnerImage, r.DockerImage, r.DockerRegistryMirror, r.GitHubClient.GithubBaseURL, registrationOnly)
|
||||||
|
if err != nil {
|
||||||
|
return pod, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Customize the pod spec according to the runner spec
|
||||||
|
runnerSpec := runner.Spec
|
||||||
|
|
||||||
|
if len(runnerSpec.VolumeMounts) != 0 {
|
||||||
|
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, runnerSpec.VolumeMounts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(runnerSpec.Volumes) != 0 {
|
||||||
|
pod.Spec.Volumes = append(pod.Spec.Volumes, runnerSpec.Volumes...)
|
||||||
|
}
|
||||||
|
if len(runnerSpec.InitContainers) != 0 {
|
||||||
|
pod.Spec.InitContainers = append(pod.Spec.InitContainers, runnerSpec.InitContainers...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if runnerSpec.NodeSelector != nil {
|
||||||
|
pod.Spec.NodeSelector = runnerSpec.NodeSelector
|
||||||
|
}
|
||||||
|
if runnerSpec.ServiceAccountName != "" {
|
||||||
|
pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName
|
||||||
|
}
|
||||||
|
if runnerSpec.AutomountServiceAccountToken != nil {
|
||||||
|
pod.Spec.AutomountServiceAccountToken = runnerSpec.AutomountServiceAccountToken
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(runnerSpec.SidecarContainers) != 0 {
|
||||||
|
pod.Spec.Containers = append(pod.Spec.Containers, runnerSpec.SidecarContainers...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(runnerSpec.ImagePullSecrets) != 0 {
|
||||||
|
pod.Spec.ImagePullSecrets = runnerSpec.ImagePullSecrets
|
||||||
|
}
|
||||||
|
|
||||||
|
if runnerSpec.Affinity != nil {
|
||||||
|
pod.Spec.Affinity = runnerSpec.Affinity
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(runnerSpec.Tolerations) != 0 {
|
||||||
|
pod.Spec.Tolerations = runnerSpec.Tolerations
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(runnerSpec.EphemeralContainers) != 0 {
|
||||||
|
pod.Spec.EphemeralContainers = runnerSpec.EphemeralContainers
|
||||||
|
}
|
||||||
|
|
||||||
|
if runnerSpec.TerminationGracePeriodSeconds != nil {
|
||||||
|
pod.Spec.TerminationGracePeriodSeconds = runnerSpec.TerminationGracePeriodSeconds
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(runnerSpec.HostAliases) != 0 {
|
||||||
|
pod.Spec.HostAliases = runnerSpec.HostAliases
|
||||||
|
}
|
||||||
|
|
||||||
|
if runnerSpec.RuntimeClassName != nil {
|
||||||
|
pod.Spec.RuntimeClassName = runnerSpec.RuntimeClassName
|
||||||
|
}
|
||||||
|
|
||||||
|
pod.ObjectMeta.Name = runner.ObjectMeta.Name
|
||||||
|
|
||||||
|
// Inject the registration token and the runner name
|
||||||
|
updated := mutatePod(&pod, runner.Status.Registration.Token)
|
||||||
|
|
||||||
|
if err := ctrl.SetControllerReference(&runner, updated, r.Scheme); err != nil {
|
||||||
|
return pod, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return *updated, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mutatePod(pod *corev1.Pod, token string) *corev1.Pod {
|
||||||
|
updated := pod.DeepCopy()
|
||||||
|
|
||||||
|
for i := range pod.Spec.Containers {
|
||||||
|
if pod.Spec.Containers[i].Name == "runner" {
|
||||||
|
updated.Spec.Containers[i].Env = append(updated.Spec.Containers[i].Env,
|
||||||
|
corev1.EnvVar{
|
||||||
|
Name: "RUNNER_NAME",
|
||||||
|
Value: pod.ObjectMeta.Name,
|
||||||
},
|
},
|
||||||
},
|
corev1.EnvVar{
|
||||||
|
Name: "RUNNER_TOKEN",
|
||||||
|
Value: token,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return updated
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRunnerPod(template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, registrationOnly bool) (corev1.Pod, error) {
|
||||||
|
var (
|
||||||
|
privileged bool = true
|
||||||
|
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
||||||
|
dockerEnabled bool = runnerSpec.DockerEnabled == nil || *runnerSpec.DockerEnabled
|
||||||
|
ephemeral bool = runnerSpec.Ephemeral == nil || *runnerSpec.Ephemeral
|
||||||
|
dockerdInRunnerPrivileged bool = dockerdInRunner
|
||||||
|
)
|
||||||
|
|
||||||
|
runnerImage := runnerSpec.Image
|
||||||
|
if runnerImage == "" {
|
||||||
|
runnerImage = defaultRunnerImage
|
||||||
|
}
|
||||||
|
|
||||||
|
workDir := runnerSpec.WorkDir
|
||||||
|
if workDir == "" {
|
||||||
|
workDir = "/runner/_work"
|
||||||
|
}
|
||||||
|
|
||||||
|
var dockerRegistryMirror string
|
||||||
|
if runnerSpec.DockerRegistryMirror == nil {
|
||||||
|
dockerRegistryMirror = defaultDockerRegistryMirror
|
||||||
|
} else {
|
||||||
|
dockerRegistryMirror = *runnerSpec.DockerRegistryMirror
|
||||||
|
}
|
||||||
|
|
||||||
|
env := []corev1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: EnvVarOrg,
|
||||||
|
Value: runnerSpec.Organization,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: EnvVarRepo,
|
||||||
|
Value: runnerSpec.Repository,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: EnvVarEnterprise,
|
||||||
|
Value: runnerSpec.Enterprise,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_LABELS",
|
||||||
|
Value: strings.Join(runnerSpec.Labels, ","),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_GROUP",
|
||||||
|
Value: runnerSpec.Group,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DOCKERD_IN_RUNNER",
|
||||||
|
Value: fmt.Sprintf("%v", dockerdInRunner),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GITHUB_URL",
|
||||||
|
Value: githubBaseURL,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_WORKDIR",
|
||||||
|
Value: workDir,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "RUNNER_EPHEMERAL",
|
||||||
|
Value: fmt.Sprintf("%v", ephemeral),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if mtu := runner.Spec.DockerMTU; mtu != nil && dockerdInRunner {
|
if registrationOnly {
|
||||||
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, []corev1.EnvVar{
|
env = append(env, corev1.EnvVar{
|
||||||
|
Name: "RUNNER_REGISTRATION_ONLY",
|
||||||
|
Value: "true",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
var seLinuxOptions *corev1.SELinuxOptions
|
||||||
|
if template.Spec.SecurityContext != nil {
|
||||||
|
seLinuxOptions = template.Spec.SecurityContext.SELinuxOptions
|
||||||
|
if seLinuxOptions != nil {
|
||||||
|
privileged = false
|
||||||
|
dockerdInRunnerPrivileged = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var runnerContainerIndex, dockerdContainerIndex int
|
||||||
|
var runnerContainer, dockerdContainer *corev1.Container
|
||||||
|
|
||||||
|
for i := range template.Spec.Containers {
|
||||||
|
c := template.Spec.Containers[i]
|
||||||
|
if c.Name == containerName {
|
||||||
|
runnerContainerIndex = i
|
||||||
|
runnerContainer = &c
|
||||||
|
} else if c.Name == "docker" {
|
||||||
|
dockerdContainerIndex = i
|
||||||
|
dockerdContainer = &c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if runnerContainer == nil {
|
||||||
|
runnerContainerIndex = -1
|
||||||
|
runnerContainer = &corev1.Container{
|
||||||
|
Name: containerName,
|
||||||
|
SecurityContext: &corev1.SecurityContext{
|
||||||
|
// Runner need to run privileged if it contains DinD
|
||||||
|
Privileged: &dockerdInRunnerPrivileged,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if dockerdContainer == nil {
|
||||||
|
dockerdContainerIndex = -1
|
||||||
|
dockerdContainer = &corev1.Container{
|
||||||
|
Name: "docker",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
runnerContainer.Image = runnerImage
|
||||||
|
if runnerContainer.ImagePullPolicy == "" {
|
||||||
|
runnerContainer.ImagePullPolicy = corev1.PullAlways
|
||||||
|
}
|
||||||
|
|
||||||
|
runnerContainer.Env = append(runnerContainer.Env, env...)
|
||||||
|
|
||||||
|
if runnerContainer.SecurityContext == nil {
|
||||||
|
runnerContainer.SecurityContext = &corev1.SecurityContext{}
|
||||||
|
}
|
||||||
|
// Runner need to run privileged if it contains DinD
|
||||||
|
runnerContainer.SecurityContext.Privileged = &dockerdInRunnerPrivileged
|
||||||
|
|
||||||
|
pod := template.DeepCopy()
|
||||||
|
|
||||||
|
if pod.Spec.RestartPolicy == "" {
|
||||||
|
pod.Spec.RestartPolicy = "OnFailure"
|
||||||
|
}
|
||||||
|
|
||||||
|
if mtu := runnerSpec.DockerMTU; mtu != nil && dockerdInRunner {
|
||||||
|
runnerContainer.Env = append(runnerContainer.Env, []corev1.EnvVar{
|
||||||
{
|
{
|
||||||
Name: "MTU",
|
Name: "MTU",
|
||||||
Value: fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
Value: fmt.Sprintf("%d", *runnerSpec.DockerMTU),
|
||||||
|
},
|
||||||
|
}...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if dockerRegistryMirror != "" && dockerdInRunner {
|
||||||
|
runnerContainer.Env = append(runnerContainer.Env, []corev1.EnvVar{
|
||||||
|
{
|
||||||
|
Name: "DOCKER_REGISTRY_MIRROR",
|
||||||
|
Value: dockerRegistryMirror,
|
||||||
},
|
},
|
||||||
}...)
|
}...)
|
||||||
}
|
}
|
||||||
@@ -641,27 +886,49 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
// When you're NOT using dindWithinRunner=true,
|
// When you're NOT using dindWithinRunner=true,
|
||||||
// it must also be shared with the dind container as it seems like required to run docker steps.
|
// it must also be shared with the dind container as it seems like required to run docker steps.
|
||||||
//
|
//
|
||||||
|
// Setting VolumeSizeLimit to zero will disable /runner emptydir mount
|
||||||
|
//
|
||||||
|
// VolumeStorageMedium defines ways that storage can be allocated to a volume: "", "Memory", "HugePages", "HugePages-<size>"
|
||||||
|
//
|
||||||
|
|
||||||
runnerVolumeName := "runner"
|
runnerVolumeName := "runner"
|
||||||
runnerVolumeMountPath := "/runner"
|
runnerVolumeMountPath := "/runner"
|
||||||
|
runnerVolumeEmptyDir := &corev1.EmptyDirVolumeSource{}
|
||||||
|
|
||||||
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
if runnerSpec.VolumeStorageMedium != nil {
|
||||||
corev1.Volume{
|
runnerVolumeEmptyDir.Medium = corev1.StorageMedium(*runnerSpec.VolumeStorageMedium)
|
||||||
Name: runnerVolumeName,
|
}
|
||||||
VolumeSource: corev1.VolumeSource{
|
|
||||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
if runnerSpec.VolumeSizeLimit != nil {
|
||||||
|
runnerVolumeEmptyDir.SizeLimit = runnerSpec.VolumeSizeLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
if runnerSpec.VolumeSizeLimit == nil || !runnerSpec.VolumeSizeLimit.IsZero() {
|
||||||
|
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||||
|
corev1.Volume{
|
||||||
|
Name: runnerVolumeName,
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
EmptyDir: runnerVolumeEmptyDir,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
)
|
||||||
)
|
|
||||||
|
|
||||||
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
|
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts,
|
||||||
corev1.VolumeMount{
|
corev1.VolumeMount{
|
||||||
Name: runnerVolumeName,
|
Name: runnerVolumeName,
|
||||||
MountPath: runnerVolumeMountPath,
|
MountPath: runnerVolumeMountPath,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
}
|
||||||
|
|
||||||
if !dockerdInRunner && dockerEnabled {
|
if !dockerdInRunner && dockerEnabled {
|
||||||
|
if runnerSpec.VolumeSizeLimit != nil && runnerSpec.VolumeSizeLimit.IsZero() {
|
||||||
|
return *pod, fmt.Errorf(
|
||||||
|
"%s volume can't be disabled because it is required to share the working directory between the runner and the dockerd containers",
|
||||||
|
runnerVolumeName,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||||
corev1.Volume{
|
corev1.Volume{
|
||||||
Name: "work",
|
Name: "work",
|
||||||
@@ -676,7 +943,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
|
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts,
|
||||||
corev1.VolumeMount{
|
corev1.VolumeMount{
|
||||||
Name: "work",
|
Name: "work",
|
||||||
MountPath: workDir,
|
MountPath: workDir,
|
||||||
@@ -687,7 +954,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, []corev1.EnvVar{
|
runnerContainer.Env = append(runnerContainer.Env, []corev1.EnvVar{
|
||||||
{
|
{
|
||||||
Name: "DOCKER_HOST",
|
Name: "DOCKER_HOST",
|
||||||
Value: "tcp://localhost:2376",
|
Value: "tcp://localhost:2376",
|
||||||
@@ -703,7 +970,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
}...)
|
}...)
|
||||||
|
|
||||||
// Determine the volume mounts assigned to the docker sidecar. In case extra mounts are included in the RunnerSpec, append them to the standard
|
// Determine the volume mounts assigned to the docker sidecar. In case extra mounts are included in the RunnerSpec, append them to the standard
|
||||||
// set of mounts. See https://github.com/summerwind/actions-runner-controller/issues/435 for context.
|
// set of mounts. See https://github.com/actions-runner-controller/actions-runner-controller/issues/435 for context.
|
||||||
dockerVolumeMounts := []corev1.VolumeMount{
|
dockerVolumeMounts := []corev1.VolumeMount{
|
||||||
{
|
{
|
||||||
Name: "work",
|
Name: "work",
|
||||||
@@ -718,106 +985,66 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
|||||||
MountPath: "/certs/client",
|
MountPath: "/certs/client",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if extraDockerVolumeMounts := runner.Spec.DockerVolumeMounts; extraDockerVolumeMounts != nil {
|
|
||||||
dockerVolumeMounts = append(dockerVolumeMounts, extraDockerVolumeMounts...)
|
if dockerdContainer.Image == "" {
|
||||||
|
dockerdContainer.Image = defaultDockerImage
|
||||||
}
|
}
|
||||||
|
|
||||||
pod.Spec.Containers = append(pod.Spec.Containers, corev1.Container{
|
dockerdContainer.Env = append(dockerdContainer.Env, corev1.EnvVar{
|
||||||
Name: "docker",
|
Name: "DOCKER_TLS_CERTDIR",
|
||||||
Image: r.DockerImage,
|
Value: "/certs",
|
||||||
VolumeMounts: dockerVolumeMounts,
|
|
||||||
Env: []corev1.EnvVar{
|
|
||||||
{
|
|
||||||
Name: "DOCKER_TLS_CERTDIR",
|
|
||||||
Value: "/certs",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SecurityContext: &corev1.SecurityContext{
|
|
||||||
Privileged: &privileged,
|
|
||||||
},
|
|
||||||
Resources: runner.Spec.DockerdContainerResources,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if mtu := runner.Spec.DockerMTU; mtu != nil {
|
if dockerdContainer.SecurityContext == nil {
|
||||||
pod.Spec.Containers[1].Env = append(pod.Spec.Containers[1].Env, []corev1.EnvVar{
|
dockerdContainer.SecurityContext = &corev1.SecurityContext{
|
||||||
|
Privileged: &privileged,
|
||||||
|
SELinuxOptions: seLinuxOptions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dockerdContainer.VolumeMounts = append(dockerdContainer.VolumeMounts, dockerVolumeMounts...)
|
||||||
|
|
||||||
|
if mtu := runnerSpec.DockerMTU; mtu != nil {
|
||||||
|
dockerdContainer.Env = append(dockerdContainer.Env, []corev1.EnvVar{
|
||||||
// See https://docs.docker.com/engine/security/rootless/
|
// See https://docs.docker.com/engine/security/rootless/
|
||||||
{
|
{
|
||||||
Name: "DOCKERD_ROOTLESS_ROOTLESSKIT_MTU",
|
Name: "DOCKERD_ROOTLESS_ROOTLESSKIT_MTU",
|
||||||
Value: fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
Value: fmt.Sprintf("%d", *runnerSpec.DockerMTU),
|
||||||
},
|
},
|
||||||
}...)
|
}...)
|
||||||
|
|
||||||
pod.Spec.Containers[1].Args = append(pod.Spec.Containers[1].Args,
|
dockerdContainer.Args = append(dockerdContainer.Args,
|
||||||
"--mtu",
|
"--mtu",
|
||||||
fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
fmt.Sprintf("%d", *runnerSpec.DockerMTU),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
if dockerRegistryMirror != "" {
|
||||||
|
dockerdContainer.Args = append(dockerdContainer.Args,
|
||||||
if len(runner.Spec.Containers) != 0 {
|
fmt.Sprintf("--registry-mirror=%s", dockerRegistryMirror),
|
||||||
pod.Spec.Containers = runner.Spec.Containers
|
)
|
||||||
for i := 0; i < len(pod.Spec.Containers); i++ {
|
|
||||||
if pod.Spec.Containers[i].Name == containerName {
|
|
||||||
pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, env...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(runner.Spec.VolumeMounts) != 0 {
|
if runnerContainerIndex == -1 {
|
||||||
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, runner.Spec.VolumeMounts...)
|
pod.Spec.Containers = append([]corev1.Container{*runnerContainer}, pod.Spec.Containers...)
|
||||||
|
|
||||||
|
if dockerdContainerIndex != -1 {
|
||||||
|
dockerdContainerIndex++
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
pod.Spec.Containers[runnerContainerIndex] = *runnerContainer
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(runner.Spec.Volumes) != 0 {
|
if !dockerdInRunner && dockerEnabled {
|
||||||
pod.Spec.Volumes = append(pod.Spec.Volumes, runner.Spec.Volumes...)
|
if dockerdContainerIndex == -1 {
|
||||||
}
|
pod.Spec.Containers = append(pod.Spec.Containers, *dockerdContainer)
|
||||||
if len(runner.Spec.InitContainers) != 0 {
|
} else {
|
||||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, runner.Spec.InitContainers...)
|
pod.Spec.Containers[dockerdContainerIndex] = *dockerdContainer
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if runner.Spec.NodeSelector != nil {
|
return *pod, nil
|
||||||
pod.Spec.NodeSelector = runner.Spec.NodeSelector
|
|
||||||
}
|
|
||||||
if runner.Spec.ServiceAccountName != "" {
|
|
||||||
pod.Spec.ServiceAccountName = runner.Spec.ServiceAccountName
|
|
||||||
}
|
|
||||||
if runner.Spec.AutomountServiceAccountToken != nil {
|
|
||||||
pod.Spec.AutomountServiceAccountToken = runner.Spec.AutomountServiceAccountToken
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(runner.Spec.SidecarContainers) != 0 {
|
|
||||||
pod.Spec.Containers = append(pod.Spec.Containers, runner.Spec.SidecarContainers...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if runner.Spec.SecurityContext != nil {
|
|
||||||
pod.Spec.SecurityContext = runner.Spec.SecurityContext
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(runner.Spec.ImagePullSecrets) != 0 {
|
|
||||||
pod.Spec.ImagePullSecrets = runner.Spec.ImagePullSecrets
|
|
||||||
}
|
|
||||||
|
|
||||||
if runner.Spec.Affinity != nil {
|
|
||||||
pod.Spec.Affinity = runner.Spec.Affinity
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(runner.Spec.Tolerations) != 0 {
|
|
||||||
pod.Spec.Tolerations = runner.Spec.Tolerations
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(runner.Spec.EphemeralContainers) != 0 {
|
|
||||||
pod.Spec.EphemeralContainers = runner.Spec.EphemeralContainers
|
|
||||||
}
|
|
||||||
|
|
||||||
if runner.Spec.TerminationGracePeriodSeconds != nil {
|
|
||||||
pod.Spec.TerminationGracePeriodSeconds = runner.Spec.TerminationGracePeriodSeconds
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ctrl.SetControllerReference(&runner, &pod, r.Scheme); err != nil {
|
|
||||||
return pod, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return pod, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
@@ -835,7 +1062,7 @@ func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
func addFinalizer(finalizers []string) ([]string, bool) {
|
func addFinalizer(finalizers []string, finalizerName string) ([]string, bool) {
|
||||||
exists := false
|
exists := false
|
||||||
for _, name := range finalizers {
|
for _, name := range finalizers {
|
||||||
if name == finalizerName {
|
if name == finalizerName {
|
||||||
@@ -850,7 +1077,7 @@ func addFinalizer(finalizers []string) ([]string, bool) {
|
|||||||
return append(finalizers, finalizerName), true
|
return append(finalizers, finalizerName), true
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeFinalizer(finalizers []string) ([]string, bool) {
|
func removeFinalizer(finalizers []string, finalizerName string) ([]string, bool) {
|
||||||
removed := false
|
removed := false
|
||||||
result := []string{}
|
result := []string{}
|
||||||
|
|
||||||
|
|||||||
431
controllers/runner_pod_controller.go
Normal file
431
controllers/runner_pod_controller.go
Normal file
@@ -0,0 +1,431 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The actions-runner-controller authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
gogithub "github.com/google/go-github/v37/github"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RunnerPodReconciler reconciles a Runner object
|
||||||
|
type RunnerPodReconciler struct {
|
||||||
|
client.Client
|
||||||
|
Log logr.Logger
|
||||||
|
Recorder record.EventRecorder
|
||||||
|
Scheme *runtime.Scheme
|
||||||
|
GitHubClient *github.Client
|
||||||
|
Name string
|
||||||
|
RegistrationRecheckInterval time.Duration
|
||||||
|
RegistrationRecheckJitter time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// This names requires at leaset one slash to work.
|
||||||
|
// See https://github.com/google/knative-gcp/issues/378
|
||||||
|
runnerPodFinalizerName = "actions.summerwind.dev/runner-pod"
|
||||||
|
|
||||||
|
AnnotationKeyLastRegistrationCheckTime = "actions-runner-controller/last-registration-check-time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
|
||||||
|
func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
|
log := r.Log.WithValues("runnerpod", req.NamespacedName)
|
||||||
|
|
||||||
|
var runnerPod corev1.Pod
|
||||||
|
if err := r.Get(ctx, req.NamespacedName, &runnerPod); err != nil {
|
||||||
|
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, isRunnerPod := runnerPod.Labels[LabelKeyRunnerSetName]
|
||||||
|
if !isRunnerPod {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var enterprise, org, repo string
|
||||||
|
|
||||||
|
envvars := runnerPod.Spec.Containers[0].Env
|
||||||
|
for _, e := range envvars {
|
||||||
|
switch e.Name {
|
||||||
|
case EnvVarEnterprise:
|
||||||
|
enterprise = e.Value
|
||||||
|
case EnvVarOrg:
|
||||||
|
org = e.Value
|
||||||
|
case EnvVarRepo:
|
||||||
|
repo = e.Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if runnerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
finalizers, added := addFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||||
|
|
||||||
|
if added {
|
||||||
|
newRunner := runnerPod.DeepCopy()
|
||||||
|
newRunner.ObjectMeta.Finalizers = finalizers
|
||||||
|
|
||||||
|
if err := r.Patch(ctx, newRunner, client.MergeFrom(&runnerPod)); err != nil {
|
||||||
|
log.Error(err, "Failed to update runner")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||||
|
|
||||||
|
if removed {
|
||||||
|
ok, err := r.unregisterRunner(ctx, enterprise, org, repo, runnerPod.Name)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, &gogithub.RateLimitError{}) {
|
||||||
|
// We log the underlying error when we failed calling GitHub API to list or unregisters,
|
||||||
|
// or the runner is still busy.
|
||||||
|
log.Error(
|
||||||
|
err,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Failed to unregister runner due to GitHub API rate limits. Delaying retry for %s to avoid excessive GitHub API calls",
|
||||||
|
retryDelayOnGitHubAPIRateLimitError,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
return ctrl.Result{RequeueAfter: retryDelayOnGitHubAPIRateLimitError}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
log.V(1).Info("Runner no longer exists on GitHub")
|
||||||
|
}
|
||||||
|
|
||||||
|
newRunner := runnerPod.DeepCopy()
|
||||||
|
newRunner.ObjectMeta.Finalizers = finalizers
|
||||||
|
|
||||||
|
if err := r.Patch(ctx, newRunner, client.MergeFrom(&runnerPod)); err != nil {
|
||||||
|
log.Error(err, "Failed to update runner for finalizer removal")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Removed runner from GitHub", "repository", repo, "organization", org)
|
||||||
|
}
|
||||||
|
|
||||||
|
deletionTimeout := 1 * time.Minute
|
||||||
|
currentTime := time.Now()
|
||||||
|
deletionDidTimeout := currentTime.Sub(runnerPod.DeletionTimestamp.Add(deletionTimeout)) > 0
|
||||||
|
|
||||||
|
if deletionDidTimeout {
|
||||||
|
log.Info(
|
||||||
|
fmt.Sprintf("Failed to delete pod within %s. ", deletionTimeout)+
|
||||||
|
"This is typically the case when a Kubernetes node became unreachable "+
|
||||||
|
"and the kube controller started evicting nodes. Forcefully deleting the pod to not get stuck.",
|
||||||
|
"podDeletionTimestamp", runnerPod.DeletionTimestamp,
|
||||||
|
"currentTime", currentTime,
|
||||||
|
"configuredDeletionTimeout", deletionTimeout,
|
||||||
|
)
|
||||||
|
|
||||||
|
var force int64 = 0
|
||||||
|
// forcefully delete runner as we would otherwise get stuck if the node stays unreachable
|
||||||
|
if err := r.Delete(ctx, &runnerPod, &client.DeleteOptions{GracePeriodSeconds: &force}); err != nil {
|
||||||
|
// probably
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
log.Error(err, "Failed to forcefully delete pod resource ...")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
// forceful deletion finally succeeded
|
||||||
|
return ctrl.Result{Requeue: true}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder.Event(&runnerPod, corev1.EventTypeNormal, "PodDeleted", fmt.Sprintf("Forcefully deleted pod '%s'", runnerPod.Name))
|
||||||
|
log.Info("Forcefully deleted runner pod", "repository", repo)
|
||||||
|
// give kube manager a little time to forcefully delete the stuck pod
|
||||||
|
return ctrl.Result{RequeueAfter: 3 * time.Second}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If pod has ended up succeeded we need to restart it
|
||||||
|
// Happens e.g. when dind is in runner and run completes
|
||||||
|
stopped := runnerPod.Status.Phase == corev1.PodSucceeded
|
||||||
|
|
||||||
|
if !stopped {
|
||||||
|
if runnerPod.Status.Phase == corev1.PodRunning {
|
||||||
|
for _, status := range runnerPod.Status.ContainerStatuses {
|
||||||
|
if status.Name != containerName {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if status.State.Terminated != nil && status.State.Terminated.ExitCode == 0 {
|
||||||
|
stopped = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
restart := stopped
|
||||||
|
|
||||||
|
var registrationRecheckDelay time.Duration
|
||||||
|
|
||||||
|
// all checks done below only decide whether a restart is needed
|
||||||
|
// if a restart was already decided before, there is no need for the checks
|
||||||
|
// saving API calls and scary log messages
|
||||||
|
if !restart {
|
||||||
|
registrationCheckInterval := time.Minute
|
||||||
|
if r.RegistrationRecheckInterval > 0 {
|
||||||
|
registrationCheckInterval = r.RegistrationRecheckInterval
|
||||||
|
}
|
||||||
|
|
||||||
|
lastCheckTimeStr := runnerPod.Annotations[AnnotationKeyLastRegistrationCheckTime]
|
||||||
|
|
||||||
|
var lastCheckTime *time.Time
|
||||||
|
|
||||||
|
if lastCheckTimeStr != "" {
|
||||||
|
t, err := time.Parse(time.RFC3339, lastCheckTimeStr)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err, "failed to parase last check time %q", lastCheckTimeStr)
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lastCheckTime = &t
|
||||||
|
}
|
||||||
|
|
||||||
|
// We want to call ListRunners GitHub Actions API only once per runner per minute.
|
||||||
|
// This if block, in conjunction with:
|
||||||
|
// return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil
|
||||||
|
// achieves that.
|
||||||
|
if lastCheckTime != nil {
|
||||||
|
nextCheckTime := lastCheckTime.Add(registrationCheckInterval)
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
// Requeue scheduled by RequeueAfter can happen a bit earlier (like dozens of milliseconds)
|
||||||
|
// so to avoid excessive, in-effective retry, we heuristically ignore the remaining delay in case it is
|
||||||
|
// shorter than 1s
|
||||||
|
requeueAfter := nextCheckTime.Sub(now) - time.Second
|
||||||
|
if requeueAfter > 0 {
|
||||||
|
log.Info(
|
||||||
|
fmt.Sprintf("Skipped registration check because it's deferred until %s. Retrying in %s at latest", nextCheckTime, requeueAfter),
|
||||||
|
"lastRegistrationCheckTime", lastCheckTime,
|
||||||
|
"registrationCheckInterval", registrationCheckInterval,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Without RequeueAfter, the controller may not retry on scheduled. Instead, it must wait until the
|
||||||
|
// next sync period passes, which can be too much later than nextCheckTime.
|
||||||
|
//
|
||||||
|
// We need to requeue on this reconcilation even though we have already scheduled the initial
|
||||||
|
// requeue previously with `return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil`.
|
||||||
|
// Apparently, the workqueue used by controller-runtime seems to deduplicate and resets the delay on
|
||||||
|
// other requeues- so the initial scheduled requeue may have been reset due to requeue on
|
||||||
|
// spec/status change.
|
||||||
|
return ctrl.Result{RequeueAfter: requeueAfter}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
notFound := false
|
||||||
|
offline := false
|
||||||
|
|
||||||
|
_, err := r.GitHubClient.IsRunnerBusy(ctx, enterprise, org, repo, runnerPod.Name)
|
||||||
|
|
||||||
|
currentTime := time.Now()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
var notFoundException *github.RunnerNotFound
|
||||||
|
var offlineException *github.RunnerOffline
|
||||||
|
if errors.As(err, ¬FoundException) {
|
||||||
|
notFound = true
|
||||||
|
} else if errors.As(err, &offlineException) {
|
||||||
|
offline = true
|
||||||
|
} else {
|
||||||
|
var e *gogithub.RateLimitError
|
||||||
|
if errors.As(err, &e) {
|
||||||
|
// We log the underlying error when we failed calling GitHub API to list or unregisters,
|
||||||
|
// or the runner is still busy.
|
||||||
|
log.Error(
|
||||||
|
err,
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Failed to check if runner is busy due to Github API rate limit. Retrying in %s to avoid excessive GitHub API calls",
|
||||||
|
retryDelayOnGitHubAPIRateLimitError,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
return ctrl.Result{RequeueAfter: retryDelayOnGitHubAPIRateLimitError}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
registrationTimeout := 10 * time.Minute
|
||||||
|
durationAfterRegistrationTimeout := currentTime.Sub(runnerPod.CreationTimestamp.Add(registrationTimeout))
|
||||||
|
registrationDidTimeout := durationAfterRegistrationTimeout > 0
|
||||||
|
|
||||||
|
if notFound {
|
||||||
|
if registrationDidTimeout {
|
||||||
|
log.Info(
|
||||||
|
"Runner failed to register itself to GitHub in timely manner. "+
|
||||||
|
"Recreating the pod to see if it resolves the issue. "+
|
||||||
|
"CAUTION: If you see this a lot, you should investigate the root cause. "+
|
||||||
|
"See https://github.com/actions-runner-controller/actions-runner-controller/issues/288",
|
||||||
|
"podCreationTimestamp", runnerPod.CreationTimestamp,
|
||||||
|
"currentTime", currentTime,
|
||||||
|
"configuredRegistrationTimeout", registrationTimeout,
|
||||||
|
)
|
||||||
|
|
||||||
|
restart = true
|
||||||
|
} else {
|
||||||
|
log.V(1).Info(
|
||||||
|
"Runner pod exists but we failed to check if runner is busy. Apparently it still needs more time.",
|
||||||
|
"runnerName", runnerPod.Name,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
} else if offline {
|
||||||
|
if registrationDidTimeout {
|
||||||
|
log.Info(
|
||||||
|
"Already existing GitHub runner still appears offline . "+
|
||||||
|
"Recreating the pod to see if it resolves the issue. "+
|
||||||
|
"CAUTION: If you see this a lot, you should investigate the root cause. ",
|
||||||
|
"podCreationTimestamp", runnerPod.CreationTimestamp,
|
||||||
|
"currentTime", currentTime,
|
||||||
|
"configuredRegistrationTimeout", registrationTimeout,
|
||||||
|
)
|
||||||
|
|
||||||
|
restart = true
|
||||||
|
} else {
|
||||||
|
log.V(1).Info(
|
||||||
|
"Runner pod exists but the GitHub runner appears to be still offline. Waiting for runner to get online ...",
|
||||||
|
"runnerName", runnerPod.Name,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (notFound || offline) && !registrationDidTimeout {
|
||||||
|
registrationRecheckJitter := 10 * time.Second
|
||||||
|
if r.RegistrationRecheckJitter > 0 {
|
||||||
|
registrationRecheckJitter = r.RegistrationRecheckJitter
|
||||||
|
}
|
||||||
|
|
||||||
|
registrationRecheckDelay = registrationCheckInterval + wait.Jitter(registrationRecheckJitter, 0.1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't do anything if there's no need to restart the runner
|
||||||
|
if !restart {
|
||||||
|
// This guard enables us to update runner.Status.Phase to `Running` only after
|
||||||
|
// the runner is registered to GitHub.
|
||||||
|
if registrationRecheckDelay > 0 {
|
||||||
|
log.V(1).Info(fmt.Sprintf("Rechecking the runner registration in %s", registrationRecheckDelay))
|
||||||
|
|
||||||
|
updated := runnerPod.DeepCopy()
|
||||||
|
t := time.Now().Format(time.RFC3339)
|
||||||
|
updated.Annotations[AnnotationKeyLastRegistrationCheckTime] = t
|
||||||
|
|
||||||
|
if err := r.Patch(ctx, updated, client.MergeFrom(&runnerPod)); err != nil {
|
||||||
|
log.Error(err, "Failed to update runner pod annotation for LastRegistrationCheckTime")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{RequeueAfter: registrationRecheckDelay}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seeing this message, you can expect the runner to become `Running` soon.
|
||||||
|
log.Info(
|
||||||
|
"Runner appears to have registered and running.",
|
||||||
|
"podCreationTimestamp", runnerPod.CreationTimestamp,
|
||||||
|
)
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete current pod if recreation is needed
|
||||||
|
if err := r.Delete(ctx, &runnerPod); err != nil {
|
||||||
|
log.Error(err, "Failed to delete pod resource")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder.Event(&runnerPod, corev1.EventTypeNormal, "PodDeleted", fmt.Sprintf("Deleted pod '%s'", runnerPod.Name))
|
||||||
|
log.Info("Deleted runner pod", "name", runnerPod.Name)
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunnerPodReconciler) unregisterRunner(ctx context.Context, enterprise, org, repo, name string) (bool, error) {
|
||||||
|
runners, err := r.GitHubClient.ListRunners(ctx, enterprise, org, repo)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var busy bool
|
||||||
|
|
||||||
|
id := int64(0)
|
||||||
|
for _, runner := range runners {
|
||||||
|
if runner.GetName() == name {
|
||||||
|
// Sometimes a runner can stuck "busy" even though it is already "offline".
|
||||||
|
// Thus removing the condition on status can block the runner pod from being terminated forever.
|
||||||
|
busy = runner.GetBusy()
|
||||||
|
if runner.GetStatus() != "offline" && busy {
|
||||||
|
r.Log.Info("This runner will delay the runner pod deletion and the runner deregistration until it becomes either offline or non-busy", "name", runner.GetName(), "status", runner.GetStatus(), "busy", runner.GetBusy())
|
||||||
|
return false, fmt.Errorf("runner is busy")
|
||||||
|
}
|
||||||
|
id = runner.GetID()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if id == int64(0) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sometimes a runner can stuck "busy" even though it is already "offline".
|
||||||
|
// Trying to remove the offline but busy runner can result in errors like the following:
|
||||||
|
// failed to remove runner: DELETE https://api.github.com/repos/actions-runner-controller/mumoshu-actions-test/actions/runners/47: 422 Bad request - Runner \"example-runnerset-0\" is still running a job\" []
|
||||||
|
if !busy {
|
||||||
|
if err := r.GitHubClient.RemoveRunner(ctx, enterprise, org, repo, id); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunnerPodReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
name := "runnerpod-controller"
|
||||||
|
if r.Name != "" {
|
||||||
|
name = r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
For(&corev1.Pod{}).
|
||||||
|
Named(name).
|
||||||
|
Complete(r)
|
||||||
|
}
|
||||||
@@ -37,8 +37,8 @@ import (
|
|||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/summerwind/actions-runner-controller/controllers/metrics"
|
"github.com/actions-runner-controller/actions-runner-controller/controllers/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -65,8 +65,7 @@ type RunnerDeploymentReconciler struct {
|
|||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerreplicasets/status,verbs=get;update;patch
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerreplicasets/status,verbs=get;update;patch
|
||||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
|
||||||
func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
ctx := context.Background()
|
|
||||||
log := r.Log.WithValues("runnerdeployment", req.NamespacedName)
|
log := r.Log.WithValues("runnerdeployment", req.NamespacedName)
|
||||||
|
|
||||||
var rd v1alpha1.RunnerDeployment
|
var rd v1alpha1.RunnerDeployment
|
||||||
@@ -155,7 +154,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
// A selector update change doesn't trigger replicaset replacement,
|
// A selector update change doesn't trigger replicaset replacement,
|
||||||
// but we still need to update the existing replicaset with it.
|
// but we still need to update the existing replicaset with it.
|
||||||
// Otherwise selector-based runner query will never work on replicasets created before the controller v0.17.0
|
// Otherwise selector-based runner query will never work on replicasets created before the controller v0.17.0
|
||||||
// See https://github.com/summerwind/actions-runner-controller/pull/355#discussion_r585379259
|
// See https://github.com/actions-runner-controller/actions-runner-controller/pull/355#discussion_r585379259
|
||||||
if err := r.Client.Update(ctx, updateSet); err != nil {
|
if err := r.Client.Update(ctx, updateSet); err != nil {
|
||||||
log.Error(err, "Failed to update runnerreplicaset resource")
|
log.Error(err, "Failed to update runnerreplicaset resource")
|
||||||
|
|
||||||
@@ -188,9 +187,12 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
return ctrl.Result{}, err
|
return ctrl.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do we old runner replica sets that should eventually deleted?
|
// Do we have old runner replica sets that should eventually deleted?
|
||||||
if len(oldSets) > 0 {
|
if len(oldSets) > 0 {
|
||||||
readyReplicas := newestSet.Status.ReadyReplicas
|
var readyReplicas int
|
||||||
|
if newestSet.Status.ReadyReplicas != nil {
|
||||||
|
readyReplicas = *newestSet.Status.ReadyReplicas
|
||||||
|
}
|
||||||
|
|
||||||
oldSetsCount := len(oldSets)
|
oldSetsCount := len(oldSets)
|
||||||
|
|
||||||
@@ -231,14 +233,49 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if rd.Spec.Replicas == nil && desiredRS.Spec.Replicas != nil {
|
var replicaSets []v1alpha1.RunnerReplicaSet
|
||||||
|
|
||||||
|
replicaSets = append(replicaSets, *newestSet)
|
||||||
|
replicaSets = append(replicaSets, oldSets...)
|
||||||
|
|
||||||
|
var totalCurrentReplicas, totalStatusAvailableReplicas, updatedReplicas int
|
||||||
|
|
||||||
|
for _, rs := range replicaSets {
|
||||||
|
var current, available int
|
||||||
|
|
||||||
|
if rs.Status.Replicas != nil {
|
||||||
|
current = *rs.Status.Replicas
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Status.AvailableReplicas != nil {
|
||||||
|
available = *rs.Status.AvailableReplicas
|
||||||
|
}
|
||||||
|
|
||||||
|
totalCurrentReplicas += current
|
||||||
|
totalStatusAvailableReplicas += available
|
||||||
|
}
|
||||||
|
|
||||||
|
if newestSet.Status.Replicas != nil {
|
||||||
|
updatedReplicas = *newestSet.Status.Replicas
|
||||||
|
}
|
||||||
|
|
||||||
|
var status v1alpha1.RunnerDeploymentStatus
|
||||||
|
|
||||||
|
status.AvailableReplicas = &totalStatusAvailableReplicas
|
||||||
|
status.ReadyReplicas = &totalStatusAvailableReplicas
|
||||||
|
status.DesiredReplicas = &newDesiredReplicas
|
||||||
|
status.Replicas = &totalCurrentReplicas
|
||||||
|
status.UpdatedReplicas = &updatedReplicas
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(rd.Status, status) {
|
||||||
updated := rd.DeepCopy()
|
updated := rd.DeepCopy()
|
||||||
updated.Status.Replicas = desiredRS.Spec.Replicas
|
updated.Status = status
|
||||||
|
|
||||||
if err := r.Status().Update(ctx, updated); err != nil {
|
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&rd)); err != nil {
|
||||||
log.Error(err, "Failed to update runnerdeployment status")
|
log.Info("Failed to patch runnerdeployment status. Retrying immediately", "error", err.Error())
|
||||||
|
return ctrl.Result{
|
||||||
return ctrl.Result{}, err
|
Requeue: true,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -401,7 +438,7 @@ func (r *RunnerDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||||||
|
|
||||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.RunnerReplicaSet{}, runnerSetOwnerKey, func(rawObj runtime.Object) []string {
|
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &v1alpha1.RunnerReplicaSet{}, runnerSetOwnerKey, func(rawObj client.Object) []string {
|
||||||
runnerSet := rawObj.(*v1alpha1.RunnerReplicaSet)
|
runnerSet := rawObj.(*v1alpha1.RunnerReplicaSet)
|
||||||
owner := metav1.GetControllerOf(runnerSet)
|
owner := metav1.GetControllerOf(runnerSet)
|
||||||
if owner == nil {
|
if owner == nil {
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewRunnerReplicaSet(t *testing.T) {
|
func TestNewRunnerReplicaSet(t *testing.T) {
|
||||||
@@ -50,7 +50,9 @@ func TestNewRunnerReplicaSet(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Labels: []string{"project1"},
|
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||||
|
Labels: []string{"project1"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -126,12 +128,13 @@ func TestNewRunnerReplicaSet(t *testing.T) {
|
|||||||
// * starting the 'RunnerDeploymentReconciler'
|
// * starting the 'RunnerDeploymentReconciler'
|
||||||
// * stopping the 'RunnerDeploymentReconciler" after the test ends
|
// * stopping the 'RunnerDeploymentReconciler" after the test ends
|
||||||
// Call this function at the start of each of your tests.
|
// Call this function at the start of each of your tests.
|
||||||
func SetupDeploymentTest(ctx context.Context) *corev1.Namespace {
|
func SetupDeploymentTest(ctx2 context.Context) *corev1.Namespace {
|
||||||
var stopCh chan struct{}
|
var ctx context.Context
|
||||||
|
var cancel func()
|
||||||
ns := &corev1.Namespace{}
|
ns := &corev1.Namespace{}
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
stopCh = make(chan struct{})
|
ctx, cancel = context.WithCancel(ctx2)
|
||||||
*ns = corev1.Namespace{
|
*ns = corev1.Namespace{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
|
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
|
||||||
}
|
}
|
||||||
@@ -157,13 +160,13 @@ func SetupDeploymentTest(ctx context.Context) *corev1.Namespace {
|
|||||||
go func() {
|
go func() {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
|
|
||||||
err := mgr.Start(stopCh)
|
err := mgr.Start(ctx)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
||||||
}()
|
}()
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
close(stopCh)
|
defer cancel()
|
||||||
|
|
||||||
err := k8sClient.Delete(ctx, ns)
|
err := k8sClient.Delete(ctx, ns)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace")
|
Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace")
|
||||||
@@ -201,10 +204,14 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "test/valid",
|
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||||
Image: "bar",
|
Repository: "test/valid",
|
||||||
Env: []corev1.EnvVar{
|
Image: "bar",
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
},
|
||||||
|
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -297,10 +304,14 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
Replicas: intPtr(1),
|
Replicas: intPtr(1),
|
||||||
Template: actionsv1alpha1.RunnerTemplate{
|
Template: actionsv1alpha1.RunnerTemplate{
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "test/valid",
|
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||||
Image: "bar",
|
Repository: "test/valid",
|
||||||
Env: []corev1.EnvVar{
|
Image: "bar",
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
},
|
||||||
|
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -393,10 +404,14 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
Replicas: intPtr(1),
|
Replicas: intPtr(1),
|
||||||
Template: actionsv1alpha1.RunnerTemplate{
|
Template: actionsv1alpha1.RunnerTemplate{
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "test/valid",
|
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||||
Image: "bar",
|
Repository: "test/valid",
|
||||||
Env: []corev1.EnvVar{
|
Image: "bar",
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
},
|
||||||
|
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -20,9 +20,10 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
gogithub "github.com/google/go-github/v33/github"
|
gogithub "github.com/google/go-github/v37/github"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
@@ -34,8 +35,8 @@ import (
|
|||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/summerwind/actions-runner-controller/github"
|
"github.com/actions-runner-controller/actions-runner-controller/github"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunnerReplicaSetReconciler reconciles a Runner object
|
// RunnerReplicaSetReconciler reconciles a Runner object
|
||||||
@@ -55,8 +56,7 @@ type RunnerReplicaSetReconciler struct {
|
|||||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/status,verbs=get;update;patch
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/status,verbs=get;update;patch
|
||||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
|
||||||
func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
func (r *RunnerReplicaSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
ctx := context.Background()
|
|
||||||
log := r.Log.WithValues("runnerreplicaset", req.NamespacedName)
|
log := r.Log.WithValues("runnerreplicaset", req.NamespacedName)
|
||||||
|
|
||||||
var rs v1alpha1.RunnerReplicaSet
|
var rs v1alpha1.RunnerReplicaSet
|
||||||
@@ -88,20 +88,23 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
var myRunners []v1alpha1.Runner
|
var myRunners []v1alpha1.Runner
|
||||||
|
|
||||||
var (
|
var (
|
||||||
available int
|
current int
|
||||||
ready int
|
ready int
|
||||||
|
available int
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, r := range allRunners.Items {
|
for _, r := range allRunners.Items {
|
||||||
// This guard is required to avoid the RunnerReplicaSet created by the controller v0.17.0 or before
|
// This guard is required to avoid the RunnerReplicaSet created by the controller v0.17.0 or before
|
||||||
// to not treat all the runners in the namespace as its children.
|
// to not treat all the runners in the namespace as its children.
|
||||||
if metav1.IsControlledBy(&r, &rs) {
|
if metav1.IsControlledBy(&r, &rs) && !metav1.HasAnnotation(r.ObjectMeta, annotationKeyRegistrationOnly) {
|
||||||
myRunners = append(myRunners, r)
|
myRunners = append(myRunners, r)
|
||||||
|
|
||||||
available += 1
|
current += 1
|
||||||
|
|
||||||
if r.Status.Phase == string(corev1.PodRunning) {
|
if r.Status.Phase == string(corev1.PodRunning) {
|
||||||
ready += 1
|
ready += 1
|
||||||
|
// available is currently the same as ready, as we don't yet have minReadySeconds for runners
|
||||||
|
available += 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -114,10 +117,75 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
desired = 1
|
desired = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
if available > desired {
|
registrationOnlyRunnerNsName := req.NamespacedName
|
||||||
n := available - desired
|
registrationOnlyRunnerNsName.Name = registrationOnlyRunnerNameFor(rs.Name)
|
||||||
|
registrationOnlyRunner := v1alpha1.Runner{}
|
||||||
|
registrationOnlyRunnerExists := false
|
||||||
|
if err := r.Get(
|
||||||
|
ctx,
|
||||||
|
registrationOnlyRunnerNsName,
|
||||||
|
®istrationOnlyRunner,
|
||||||
|
); err != nil {
|
||||||
|
if !kerrors.IsNotFound(err) {
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
registrationOnlyRunnerExists = true
|
||||||
|
}
|
||||||
|
|
||||||
log.V(0).Info(fmt.Sprintf("Deleting %d runners", n), "desired", desired, "available", available, "ready", ready)
|
// On scale to zero, we must have fully registered registration-only runner before we start deleting other runners, hence `desired == 0`
|
||||||
|
// On scale from zero, we must retain the registratoin-only runner until one or more other runners get registered, hence `registrationOnlyRunnerExists && available == 0`.
|
||||||
|
// On RunnerReplicaSet creation, it have always 0 replics and no registration-only runner.
|
||||||
|
// In this case We don't need to bother creating a registration-only runner which gets deleted soon after we have 1 or more available repolicas,
|
||||||
|
// hence it's not `available == 0`, but `registrationOnlyRunnerExists && available == 0`.
|
||||||
|
// See https://github.com/actions-runner-controller/actions-runner-controller/issues/516
|
||||||
|
registrationOnlyRunnerNeeded := desired == 0 || (registrationOnlyRunnerExists && current == 0)
|
||||||
|
|
||||||
|
if registrationOnlyRunnerNeeded {
|
||||||
|
if registrationOnlyRunnerExists {
|
||||||
|
if registrationOnlyRunner.Status.Phase == "" {
|
||||||
|
log.Info("Still waiting for the registration-only runner to be registered")
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// A registration-only runner does not exist and is needed, hence create it.
|
||||||
|
|
||||||
|
runnerForScaleFromToZero, err := r.newRunner(rs)
|
||||||
|
if err != nil {
|
||||||
|
return ctrl.Result{}, fmt.Errorf("failed to create runner for scale from/to zero: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
runnerForScaleFromToZero.ObjectMeta.Name = registrationOnlyRunnerNsName.Name
|
||||||
|
runnerForScaleFromToZero.ObjectMeta.GenerateName = ""
|
||||||
|
runnerForScaleFromToZero.ObjectMeta.Labels = nil
|
||||||
|
metav1.SetMetaDataAnnotation(&runnerForScaleFromToZero.ObjectMeta, annotationKeyRegistrationOnly, "true")
|
||||||
|
|
||||||
|
if err := r.Client.Create(ctx, &runnerForScaleFromToZero); err != nil {
|
||||||
|
log.Error(err, "Failed to create runner for scale from/to zero")
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can continue to deleting runner pods only after the
|
||||||
|
// registration-only runner gets registered.
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// A registration-only runner exists and is not needed, hence delete it.
|
||||||
|
if registrationOnlyRunnerExists {
|
||||||
|
if err := r.Client.Delete(ctx, ®istrationOnlyRunner); err != nil {
|
||||||
|
log.Error(err, "Retrying soon because we failed to delete registration-only runner")
|
||||||
|
|
||||||
|
return ctrl.Result{Requeue: true}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if current > desired {
|
||||||
|
n := current - desired
|
||||||
|
|
||||||
|
log.V(0).Info(fmt.Sprintf("Deleting %d runners", n), "desired", desired, "current", current, "ready", ready)
|
||||||
|
|
||||||
// get runners that are currently offline/not busy/timed-out to register
|
// get runners that are currently offline/not busy/timed-out to register
|
||||||
var deletionCandidates []v1alpha1.Runner
|
var deletionCandidates []v1alpha1.Runner
|
||||||
@@ -163,7 +231,7 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
"Runner failed to register itself to GitHub in timely manner. "+
|
"Runner failed to register itself to GitHub in timely manner. "+
|
||||||
"Marking the runner for scale down. "+
|
"Marking the runner for scale down. "+
|
||||||
"CAUTION: If you see this a lot, you should investigate the root cause. "+
|
"CAUTION: If you see this a lot, you should investigate the root cause. "+
|
||||||
"See https://github.com/summerwind/actions-runner-controller/issues/288",
|
"See https://github.com/actions-runner-controller/actions-runner-controller/issues/288",
|
||||||
"runnerCreationTimestamp", runner.CreationTimestamp,
|
"runnerCreationTimestamp", runner.CreationTimestamp,
|
||||||
"currentTime", currentTime,
|
"currentTime", currentTime,
|
||||||
"configuredRegistrationTimeout", registrationTimeout,
|
"configuredRegistrationTimeout", registrationTimeout,
|
||||||
@@ -185,6 +253,8 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
n = len(deletionCandidates)
|
n = len(deletionCandidates)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.V(0).Info(fmt.Sprintf("Deleting %d runner(s)", n), "desired", desired, "current", current, "ready", ready)
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
if err := r.Client.Delete(ctx, &deletionCandidates[i]); client.IgnoreNotFound(err) != nil {
|
if err := r.Client.Delete(ctx, &deletionCandidates[i]); client.IgnoreNotFound(err) != nil {
|
||||||
log.Error(err, "Failed to delete runner resource")
|
log.Error(err, "Failed to delete runner resource")
|
||||||
@@ -195,10 +265,10 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
r.Recorder.Event(&rs, corev1.EventTypeNormal, "RunnerDeleted", fmt.Sprintf("Deleted runner '%s'", deletionCandidates[i].Name))
|
r.Recorder.Event(&rs, corev1.EventTypeNormal, "RunnerDeleted", fmt.Sprintf("Deleted runner '%s'", deletionCandidates[i].Name))
|
||||||
log.Info("Deleted runner")
|
log.Info("Deleted runner")
|
||||||
}
|
}
|
||||||
} else if desired > available {
|
} else if desired > current {
|
||||||
n := desired - available
|
n := desired - current
|
||||||
|
|
||||||
log.V(0).Info(fmt.Sprintf("Creating %d runner(s)", n), "desired", desired, "available", available, "ready", ready)
|
log.V(0).Info(fmt.Sprintf("Creating %d runner(s)", n), "desired", desired, "available", current, "ready", ready)
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
newRunner, err := r.newRunner(rs)
|
newRunner, err := r.newRunner(rs)
|
||||||
@@ -216,13 +286,18 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if rs.Status.AvailableReplicas != available || rs.Status.ReadyReplicas != ready {
|
var status v1alpha1.RunnerReplicaSetStatus
|
||||||
updated := rs.DeepCopy()
|
|
||||||
updated.Status.AvailableReplicas = available
|
|
||||||
updated.Status.ReadyReplicas = ready
|
|
||||||
|
|
||||||
if err := r.Status().Update(ctx, updated); err != nil {
|
status.Replicas = ¤t
|
||||||
log.Info("Failed to update status. Retrying immediately", "error", err.Error())
|
status.AvailableReplicas = &available
|
||||||
|
status.ReadyReplicas = &ready
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(rs.Status, status) {
|
||||||
|
updated := rs.DeepCopy()
|
||||||
|
updated.Status = status
|
||||||
|
|
||||||
|
if err := r.Status().Patch(ctx, updated, client.MergeFrom(&rs)); err != nil {
|
||||||
|
log.Info("Failed to update runnerreplicaset status. Retrying immediately", "error", err.Error())
|
||||||
return ctrl.Result{
|
return ctrl.Result{
|
||||||
Requeue: true,
|
Requeue: true,
|
||||||
}, nil
|
}, nil
|
||||||
@@ -265,3 +340,7 @@ func (r *RunnerReplicaSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||||||
Named(name).
|
Named(name).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func registrationOnlyRunnerNameFor(rsName string) string {
|
||||||
|
return rsName + "-registration-only"
|
||||||
|
}
|
||||||
|
|||||||
@@ -2,15 +2,14 @@ package controllers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-github/v33/github"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
"k8s.io/utils/pointer"
|
|
||||||
ctrl "sigs.k8s.io/controller-runtime"
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
|
||||||
@@ -19,8 +18,8 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
actionsv1alpha1 "github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
"github.com/summerwind/actions-runner-controller/github/fake"
|
"github.com/actions-runner-controller/actions-runner-controller/github/fake"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -34,12 +33,13 @@ var (
|
|||||||
// * starting the 'RunnerReconciler'
|
// * starting the 'RunnerReconciler'
|
||||||
// * stopping the 'RunnerReplicaSetReconciler" after the test ends
|
// * stopping the 'RunnerReplicaSetReconciler" after the test ends
|
||||||
// Call this function at the start of each of your tests.
|
// Call this function at the start of each of your tests.
|
||||||
func SetupTest(ctx context.Context) *corev1.Namespace {
|
func SetupTest(ctx2 context.Context) *corev1.Namespace {
|
||||||
var stopCh chan struct{}
|
var ctx context.Context
|
||||||
|
var cancel func()
|
||||||
ns := &corev1.Namespace{}
|
ns := &corev1.Namespace{}
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
stopCh = make(chan struct{})
|
ctx, cancel = context.WithCancel(ctx2)
|
||||||
*ns = corev1.Namespace{
|
*ns = corev1.Namespace{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
|
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
|
||||||
}
|
}
|
||||||
@@ -70,13 +70,13 @@ func SetupTest(ctx context.Context) *corev1.Namespace {
|
|||||||
go func() {
|
go func() {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
|
|
||||||
err := mgr.Start(stopCh)
|
err := mgr.Start(ctx)
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
||||||
}()
|
}()
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
close(stopCh)
|
defer cancel()
|
||||||
|
|
||||||
server.Close()
|
server.Close()
|
||||||
err := k8sClient.Delete(ctx, ns)
|
err := k8sClient.Delete(ctx, ns)
|
||||||
@@ -129,10 +129,14 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Spec: actionsv1alpha1.RunnerSpec{
|
Spec: actionsv1alpha1.RunnerSpec{
|
||||||
Repository: "test/valid",
|
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||||
Image: "bar",
|
Repository: "test/valid",
|
||||||
Env: []corev1.EnvVar{
|
Image: "bar",
|
||||||
{Name: "FOO", Value: "FOOVALUE"},
|
},
|
||||||
|
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||||
|
Env: []corev1.EnvVar{
|
||||||
|
{Name: "FOO", Value: "FOOVALUE"},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -169,15 +173,7 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, runner := range runners.Items {
|
runnersList.Sync(runners.Items)
|
||||||
runnersList.Add(&github.Runner{
|
|
||||||
ID: pointer.Int64Ptr(int64(i) + 1),
|
|
||||||
Name: pointer.StringPtr(runner.Name),
|
|
||||||
OS: pointer.StringPtr("linux"),
|
|
||||||
Status: pointer.StringPtr("online"),
|
|
||||||
Busy: pointer.BoolPtr(false),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(runners.Items)
|
return len(runners.Items)
|
||||||
},
|
},
|
||||||
@@ -226,15 +222,7 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
logf.Log.Error(err, "list runners")
|
logf.Log.Error(err, "list runners")
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, runner := range runners.Items {
|
runnersList.Sync(runners.Items)
|
||||||
runnersList.Add(&github.Runner{
|
|
||||||
ID: pointer.Int64Ptr(int64(i) + 1),
|
|
||||||
Name: pointer.StringPtr(runner.Name),
|
|
||||||
OS: pointer.StringPtr("linux"),
|
|
||||||
Status: pointer.StringPtr("online"),
|
|
||||||
Busy: pointer.BoolPtr(false),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(runners.Items)
|
return len(runners.Items)
|
||||||
},
|
},
|
||||||
@@ -262,21 +250,35 @@ var _ = Context("Inside of a new namespace", func() {
|
|||||||
|
|
||||||
Eventually(
|
Eventually(
|
||||||
func() int {
|
func() int {
|
||||||
err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name))
|
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
|
||||||
if err != nil {
|
MatchLabels: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
var regOnly actionsv1alpha1.Runner
|
||||||
|
if err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: registrationOnlyRunnerNameFor(name)}, ®Only); err != nil {
|
||||||
|
logf.Log.Info(fmt.Sprintf("Failed getting registration-only runner in test: %v", err))
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
updated := regOnly.DeepCopy()
|
||||||
|
updated.Status.Phase = "Completed"
|
||||||
|
|
||||||
|
if err := k8sClient.Status().Patch(ctx, updated, client.MergeFrom(®Only)); err != nil {
|
||||||
|
logf.Log.Info(fmt.Sprintf("Failed updating registration-only runner in test: %v", err))
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
runnersList.AddOffline([]actionsv1alpha1.Runner{*updated})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := k8sClient.List(ctx, &runners, client.InNamespace(ns.Name), client.MatchingLabelsSelector{Selector: selector}); err != nil {
|
||||||
logf.Log.Error(err, "list runners")
|
logf.Log.Error(err, "list runners")
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, runner := range runners.Items {
|
runnersList.Sync(runners.Items)
|
||||||
runnersList.Add(&github.Runner{
|
|
||||||
ID: pointer.Int64Ptr(int64(i) + 1),
|
|
||||||
Name: pointer.StringPtr(runner.Name),
|
|
||||||
OS: pointer.StringPtr("linux"),
|
|
||||||
Status: pointer.StringPtr("online"),
|
|
||||||
Busy: pointer.BoolPtr(false),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(runners.Items)
|
return len(runners.Items)
|
||||||
},
|
},
|
||||||
|
|||||||
318
controllers/runnerset_controller.go
Normal file
318
controllers/runnerset_controller.go
Normal file
@@ -0,0 +1,318 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2021 The actions-runner-controller authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/client-go/tools/record"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||||
|
"github.com/actions-runner-controller/actions-runner-controller/controllers/metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
LabelKeyRunnerSetName = "runnerset-name"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RunnerSetReconciler reconciles a Runner object
|
||||||
|
type RunnerSetReconciler struct {
|
||||||
|
Name string
|
||||||
|
|
||||||
|
client.Client
|
||||||
|
Log logr.Logger
|
||||||
|
Recorder record.EventRecorder
|
||||||
|
Scheme *runtime.Scheme
|
||||||
|
|
||||||
|
CommonRunnerLabels []string
|
||||||
|
GitHubBaseURL string
|
||||||
|
RunnerImage string
|
||||||
|
DockerImage string
|
||||||
|
DockerRegistryMirror string
|
||||||
|
}
|
||||||
|
|
||||||
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets,verbs=get;list;watch;create;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets/status,verbs=get;update;patch
|
||||||
|
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
|
||||||
|
// +kubebuilder:rbac:groups=apps,resources=statefulsets/status,verbs=get;update;patch
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||||
|
// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update
|
||||||
|
|
||||||
|
// Note that coordination.k8s.io/leases permission must be added to any of the controllers to avoid the following error:
|
||||||
|
// E0613 07:02:08.004278 1 leaderelection.go:325] error retrieving resource lock actions-runner-system/actions-runner-controller: leases.coordination.k8s.io "actions-runner-controller" is forbidden: User "system:serviceaccount:actions-runner-system:actions-runner-controller" cannot get resource "leases" in API group "coordination.k8s.io" in the namespace "actions-runner-system"
|
||||||
|
|
||||||
|
func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
|
log := r.Log.WithValues("runnerset", req.NamespacedName)
|
||||||
|
|
||||||
|
runnerSet := &v1alpha1.RunnerSet{}
|
||||||
|
if err := r.Get(ctx, req.NamespacedName, runnerSet); err != nil {
|
||||||
|
err = client.IgnoreNotFound(err)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err, "Could not get RunnerSet")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !runnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics.SetRunnerSet(*runnerSet)
|
||||||
|
|
||||||
|
desiredStatefulSet, err := r.newStatefulSet(runnerSet)
|
||||||
|
if err != nil {
|
||||||
|
r.Recorder.Event(runnerSet, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||||
|
|
||||||
|
log.Error(err, "Could not create statefulset")
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
liveStatefulSet := &appsv1.StatefulSet{}
|
||||||
|
if err := r.Get(ctx, types.NamespacedName{Namespace: runnerSet.Namespace, Name: runnerSet.Name}, liveStatefulSet); err != nil {
|
||||||
|
if !errors.IsNotFound(err) {
|
||||||
|
log.Error(err, "Failed to get live statefulset")
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := r.Client.Create(ctx, desiredStatefulSet); err != nil {
|
||||||
|
log.Error(err, "Failed to create statefulset resource")
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
liveTemplateHash, ok := getStatefulSetTemplateHash(liveStatefulSet)
|
||||||
|
if !ok {
|
||||||
|
log.Info("Failed to get template hash of newest statefulset resource. It must be in an invalid state. Please manually delete the statefulset so that it is recreated")
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
desiredTemplateHash, ok := getStatefulSetTemplateHash(desiredStatefulSet)
|
||||||
|
if !ok {
|
||||||
|
log.Info("Failed to get template hash of desired statefulset. It must be in an invalid state. Please manually delete the statefulset so that it is recreated")
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if liveTemplateHash != desiredTemplateHash {
|
||||||
|
copy := liveStatefulSet.DeepCopy()
|
||||||
|
copy.Spec = desiredStatefulSet.Spec
|
||||||
|
|
||||||
|
if err := r.Client.Patch(ctx, copy, client.MergeFrom(liveStatefulSet)); err != nil {
|
||||||
|
log.Error(err, "Failed to patch statefulset", "reason", errors.ReasonForError(err))
|
||||||
|
|
||||||
|
if errors.IsInvalid(err) {
|
||||||
|
// NOTE: This might not be ideal but deal the forbidden error by recreating the statefulset
|
||||||
|
// Probably we'd better create a registration-only runner to prevent queued jobs from immediately failing.
|
||||||
|
//
|
||||||
|
// 2021-06-13T07:19:52.760Z ERROR actions-runner-controller.runnerset Failed to patch statefulset
|
||||||
|
// {"runnerset": "default/example-runnerset", "error": "StatefulSet.apps \"example-runnerset\" is invalid: s
|
||||||
|
// pec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy'
|
||||||
|
// are forbidden"}
|
||||||
|
//
|
||||||
|
// Even though the error message includes "Forbidden", this error's reason is "Invalid".
|
||||||
|
// That's why we're using errors.IsInvalid above.
|
||||||
|
|
||||||
|
if err := r.Client.Delete(ctx, liveStatefulSet); err != nil {
|
||||||
|
log.Error(err, "Failed to delete statefulset for force-update")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
log.Info("Deleted statefulset for force-update")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We requeue in order to clean up old runner replica sets later.
|
||||||
|
// Otherwise, they aren't cleaned up until the next re-sync interval.
|
||||||
|
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const defaultReplicas = 1
|
||||||
|
|
||||||
|
var replicasOfLiveStatefulSet *int
|
||||||
|
if liveStatefulSet.Spec.Replicas != nil {
|
||||||
|
v := int(*liveStatefulSet.Spec.Replicas)
|
||||||
|
replicasOfLiveStatefulSet = &v
|
||||||
|
}
|
||||||
|
|
||||||
|
var replicasOfDesiredStatefulSet *int
|
||||||
|
if desiredStatefulSet.Spec.Replicas != nil {
|
||||||
|
v := int(*desiredStatefulSet.Spec.Replicas)
|
||||||
|
replicasOfDesiredStatefulSet = &v
|
||||||
|
}
|
||||||
|
|
||||||
|
currentDesiredReplicas := getIntOrDefault(replicasOfLiveStatefulSet, defaultReplicas)
|
||||||
|
newDesiredReplicas := getIntOrDefault(replicasOfDesiredStatefulSet, defaultReplicas)
|
||||||
|
|
||||||
|
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
||||||
|
if currentDesiredReplicas != newDesiredReplicas {
|
||||||
|
v := int32(newDesiredReplicas)
|
||||||
|
|
||||||
|
updated := liveStatefulSet.DeepCopy()
|
||||||
|
updated.Spec.Replicas = &v
|
||||||
|
|
||||||
|
if err := r.Client.Patch(ctx, updated, client.MergeFrom(liveStatefulSet)); err != nil {
|
||||||
|
log.Error(err, "Failed to update statefulset")
|
||||||
|
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
statusReplicas := int(liveStatefulSet.Status.Replicas)
|
||||||
|
statusReadyReplicas := int(liveStatefulSet.Status.ReadyReplicas)
|
||||||
|
totalCurrentReplicas := int(liveStatefulSet.Status.CurrentReplicas)
|
||||||
|
updatedReplicas := int(liveStatefulSet.Status.UpdatedReplicas)
|
||||||
|
|
||||||
|
status := runnerSet.Status.DeepCopy()
|
||||||
|
|
||||||
|
status.CurrentReplicas = &totalCurrentReplicas
|
||||||
|
status.ReadyReplicas = &statusReadyReplicas
|
||||||
|
status.DesiredReplicas = &newDesiredReplicas
|
||||||
|
status.Replicas = &statusReplicas
|
||||||
|
status.UpdatedReplicas = &updatedReplicas
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(runnerSet.Status, status) {
|
||||||
|
updated := runnerSet.DeepCopy()
|
||||||
|
updated.Status = *status
|
||||||
|
|
||||||
|
if err := r.Status().Patch(ctx, updated, client.MergeFrom(runnerSet)); err != nil {
|
||||||
|
log.Info("Failed to patch runnerset status. Retrying immediately", "error", err.Error())
|
||||||
|
return ctrl.Result{
|
||||||
|
Requeue: true,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStatefulSetTemplateHash(rs *appsv1.StatefulSet) (string, bool) {
|
||||||
|
hash, ok := rs.Labels[LabelKeyRunnerTemplateHash]
|
||||||
|
|
||||||
|
return hash, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRunnerSetSelector(runnerSet *v1alpha1.RunnerSet) *metav1.LabelSelector {
|
||||||
|
selector := runnerSet.Spec.Selector
|
||||||
|
if selector == nil {
|
||||||
|
selector = &metav1.LabelSelector{MatchLabels: map[string]string{LabelKeyRunnerSetName: runnerSet.Name}}
|
||||||
|
}
|
||||||
|
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
var LabelKeyPodMutation = "actions-runner-controller/inject-registration-token"
|
||||||
|
var LabelValuePodMutation = "true"
|
||||||
|
|
||||||
|
func (r *RunnerSetReconciler) newStatefulSet(runnerSet *v1alpha1.RunnerSet) (*appsv1.StatefulSet, error) {
|
||||||
|
runnerSetWithOverrides := *runnerSet.Spec.DeepCopy()
|
||||||
|
|
||||||
|
for _, l := range r.CommonRunnerLabels {
|
||||||
|
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, l)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||||
|
runnerSetWithOverrides.Template.ObjectMeta.Labels = CloneAndAddLabel(runnerSetWithOverrides.Template.ObjectMeta.Labels, LabelKeyRunnerSetName, runnerSet.Name)
|
||||||
|
|
||||||
|
runnerSetWithOverrides.Template.ObjectMeta.Labels = CloneAndAddLabel(runnerSetWithOverrides.Template.ObjectMeta.Labels, LabelKeyPodMutation, LabelValuePodMutation)
|
||||||
|
|
||||||
|
template := corev1.Pod{
|
||||||
|
ObjectMeta: runnerSetWithOverrides.StatefulSetSpec.Template.ObjectMeta,
|
||||||
|
Spec: runnerSetWithOverrides.StatefulSetSpec.Template.Spec,
|
||||||
|
}
|
||||||
|
|
||||||
|
pod, err := newRunnerPod(template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.DockerImage, r.DockerRegistryMirror, r.GitHubBaseURL, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
runnerSetWithOverrides.StatefulSetSpec.Template.ObjectMeta = pod.ObjectMeta
|
||||||
|
runnerSetWithOverrides.StatefulSetSpec.Template.Spec = pod.Spec
|
||||||
|
// NOTE: Seems like the only supported restart policy for statefulset is "Always"?
|
||||||
|
// I got errosr like the below when tried to use "OnFailure":
|
||||||
|
// StatefulSet.apps \"example-runnersetpg9rx\" is invalid: [spec.template.metadata.labels: Invalid value: map[string]string{\"runner-template-hash\"
|
||||||
|
// :\"85d7578bd6\", \"runnerset-name\":\"example-runnerset\"}: `selector` does not match template `labels`, spec.
|
||||||
|
// template.spec.restartPolicy: Unsupported value: \"OnFailure\": supported values: \"Always\"]
|
||||||
|
runnerSetWithOverrides.StatefulSetSpec.Template.Spec.RestartPolicy = corev1.RestartPolicyAlways
|
||||||
|
|
||||||
|
templateHash := ComputeHash(pod.Spec)
|
||||||
|
|
||||||
|
// Add template hash label to selector.
|
||||||
|
runnerSetWithOverrides.Template.ObjectMeta.Labels = CloneAndAddLabel(runnerSetWithOverrides.Template.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||||
|
|
||||||
|
selector := getRunnerSetSelector(runnerSet)
|
||||||
|
selector = CloneSelectorAndAddLabel(selector, LabelKeyRunnerTemplateHash, templateHash)
|
||||||
|
selector = CloneSelectorAndAddLabel(selector, LabelKeyRunnerSetName, runnerSet.Name)
|
||||||
|
selector = CloneSelectorAndAddLabel(selector, LabelKeyPodMutation, LabelValuePodMutation)
|
||||||
|
|
||||||
|
runnerSetWithOverrides.StatefulSetSpec.Selector = selector
|
||||||
|
|
||||||
|
rs := appsv1.StatefulSet{
|
||||||
|
TypeMeta: metav1.TypeMeta{},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: runnerSet.ObjectMeta.Name,
|
||||||
|
Namespace: runnerSet.ObjectMeta.Namespace,
|
||||||
|
Labels: CloneAndAddLabel(runnerSet.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash),
|
||||||
|
},
|
||||||
|
Spec: runnerSetWithOverrides.StatefulSetSpec,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ctrl.SetControllerReference(runnerSet, &rs, r.Scheme); err != nil {
|
||||||
|
return &rs, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &rs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
name := "runnerset-controller"
|
||||||
|
if r.Name != "" {
|
||||||
|
name = r.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||||
|
|
||||||
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
For(&v1alpha1.RunnerSet{}).
|
||||||
|
Owns(&appsv1.StatefulSet{}).
|
||||||
|
Named(name).
|
||||||
|
Complete(r)
|
||||||
|
}
|
||||||
122
controllers/schedule.go
Normal file
122
controllers/schedule.go
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
package controllers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/teambition/rrule-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RecurrenceRule struct {
|
||||||
|
Frequency string
|
||||||
|
UntilTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type Period struct {
|
||||||
|
StartTime time.Time
|
||||||
|
EndTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Period) String() string {
|
||||||
|
if r == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.StartTime.Format(time.RFC3339) + "-" + r.EndTime.Format(time.RFC3339)
|
||||||
|
}
|
||||||
|
|
||||||
|
func MatchSchedule(now time.Time, startTime, endTime time.Time, recurrenceRule RecurrenceRule) (*Period, *Period, error) {
|
||||||
|
return calculateActiveAndUpcomingRecurringPeriods(
|
||||||
|
now,
|
||||||
|
startTime,
|
||||||
|
endTime,
|
||||||
|
recurrenceRule.Frequency,
|
||||||
|
recurrenceRule.UntilTime,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateActiveAndUpcomingRecurringPeriods(now, startTime, endTime time.Time, frequency string, untilTime time.Time) (*Period, *Period, error) {
|
||||||
|
var freqValue rrule.Frequency
|
||||||
|
|
||||||
|
var freqDurationDay int
|
||||||
|
var freqDurationMonth int
|
||||||
|
var freqDurationYear int
|
||||||
|
|
||||||
|
switch frequency {
|
||||||
|
case "Daily":
|
||||||
|
freqValue = rrule.DAILY
|
||||||
|
freqDurationDay = 1
|
||||||
|
case "Weekly":
|
||||||
|
freqValue = rrule.WEEKLY
|
||||||
|
freqDurationDay = 7
|
||||||
|
case "Monthly":
|
||||||
|
freqValue = rrule.MONTHLY
|
||||||
|
freqDurationMonth = 1
|
||||||
|
case "Yearly":
|
||||||
|
freqValue = rrule.YEARLY
|
||||||
|
freqDurationYear = 1
|
||||||
|
case "":
|
||||||
|
if now.Before(startTime) {
|
||||||
|
return nil, &Period{StartTime: startTime, EndTime: endTime}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if now.Before(endTime) {
|
||||||
|
return &Period{StartTime: startTime, EndTime: endTime}, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil, nil
|
||||||
|
default:
|
||||||
|
return nil, nil, fmt.Errorf(`invalid freq %q: It must be one of "Daily", "Weekly", "Monthly", and "Yearly"`, frequency)
|
||||||
|
}
|
||||||
|
|
||||||
|
freqDurationLater := time.Date(
|
||||||
|
now.Year()+freqDurationYear,
|
||||||
|
time.Month(int(now.Month())+freqDurationMonth),
|
||||||
|
now.Day()+freqDurationDay,
|
||||||
|
now.Hour(), now.Minute(), now.Second(), now.Nanosecond(), now.Location(),
|
||||||
|
)
|
||||||
|
|
||||||
|
freqDuration := freqDurationLater.Sub(now)
|
||||||
|
|
||||||
|
overrideDuration := endTime.Sub(startTime)
|
||||||
|
if overrideDuration > freqDuration {
|
||||||
|
return nil, nil, fmt.Errorf("override's duration %s must be equal to sor shorter than the duration implied by freq %q (%s)", overrideDuration, frequency, freqDuration)
|
||||||
|
}
|
||||||
|
|
||||||
|
rrule, err := rrule.NewRRule(rrule.ROption{
|
||||||
|
Freq: freqValue,
|
||||||
|
Dtstart: startTime,
|
||||||
|
Until: untilTime,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
overrideDurationBefore := now.Add(-overrideDuration + 1)
|
||||||
|
activeOverrideStarts := rrule.Between(overrideDurationBefore, now, true)
|
||||||
|
|
||||||
|
var active *Period
|
||||||
|
|
||||||
|
if len(activeOverrideStarts) > 1 {
|
||||||
|
return nil, nil, fmt.Errorf("[bug] unexpted number of active overrides found: %v", activeOverrideStarts)
|
||||||
|
} else if len(activeOverrideStarts) == 1 {
|
||||||
|
active = &Period{
|
||||||
|
StartTime: activeOverrideStarts[0],
|
||||||
|
EndTime: activeOverrideStarts[0].Add(overrideDuration),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
oneSecondLater := now.Add(1)
|
||||||
|
upcomingOverrideStarts := rrule.Between(oneSecondLater, freqDurationLater, true)
|
||||||
|
|
||||||
|
var next *Period
|
||||||
|
|
||||||
|
if len(upcomingOverrideStarts) > 0 {
|
||||||
|
next = &Period{
|
||||||
|
StartTime: upcomingOverrideStarts[0],
|
||||||
|
EndTime: upcomingOverrideStarts[0].Add(overrideDuration),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return active, next, nil
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user